summaryrefslogtreecommitdiffstats
path: root/src/spdk/intel-ipsec-mb
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/intel-ipsec-mb')
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/.github/PULL_REQUEST_TEMPLATE.md34
-rw-r--r--src/spdk/intel-ipsec-mb/.gitignore6
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/CONTRIBUTING319
-rw-r--r--src/spdk/intel-ipsec-mb/LICENSE24
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/Makefile87
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/README82
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/LibPerfApp/ipsec_diff_tool.py308
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_perf.c2581
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/msr.c304
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/msr.h114
-rw-r--r--src/spdk/intel-ipsec-mb/LibPerfApp/win_x64.mak81
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/Makefile131
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/aes_test.c1117
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/api_test.c612
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/ccm_test.c2092
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/chained_test.c511
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/cmac_test.c1354
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/ctr_test.c1497
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/customop_test.c311
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/customop_test.h34
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/des_test.c731
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/direct_api_test.c1093
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/do_test.h302
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/ecb_test.c804
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/gcm_ctr_vectors_test.h78
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/gcm_test.c1423
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/gcm_vectors.h38
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/hmac_md5_test.c558
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha1_test.c537
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha256_sha512_test.c1116
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/ipsec_xvalid.c2055
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test.c1327
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test_vectors.h1159
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/main.c314
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/misc.asm251
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/misc.h58
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/pon_test.c694
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/sha_test.c588
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test.c1979
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test_vectors.h802
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/utils.c70
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/utils.h35
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/win_x64.mak151
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/zuc_test.c660
-rw-r--r--src/spdk/intel-ipsec-mb/LibTestApp/zuc_test_vectors.h587
-rw-r--r--src/spdk/intel-ipsec-mb/Makefile690
-rw-r--r--src/spdk/intel-ipsec-mb/README478
-rw-r--r--src/spdk/intel-ipsec-mb/ReleaseNotes.txt575
-rw-r--r--src/spdk/intel-ipsec-mb/aes128_ecbenc_x3.asm346
-rw-r--r--src/spdk/intel-ipsec-mb/aes_cmac_subkey_gen.asm375
-rw-r--r--src/spdk/intel-ipsec-mb/aes_keyexp_128.asm523
-rw-r--r--src/spdk/intel-ipsec-mb/aes_keyexp_192.asm622
-rw-r--r--src/spdk/intel-ipsec-mb/aes_keyexp_256.asm677
-rw-r--r--src/spdk/intel-ipsec-mb/aes_xcbc_expand_key.c139
-rw-r--r--src/spdk/intel-ipsec-mb/alloc.c84
-rw-r--r--src/spdk/intel-ipsec-mb/asm.h212
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes128_cbc_dec_by8_avx.asm306
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes128_cbc_mac_x8.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes128_cntr_by8_avx.asm606
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes128_cntr_ccm_by8_avx.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes192_cbc_dec_by8_avx.asm328
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes192_cntr_by8_avx.asm504
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes256_cbc_dec_by8_avx.asm344
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes256_cntr_by8_avx.asm516
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_128_x8.asm494
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_192_x8.asm501
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_256_x8.asm536
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_cfb_128_avx.asm165
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_ecb_by4_avx.asm654
-rw-r--r--src/spdk/intel-ipsec-mb/avx/aes_xcbc_mac_128_x8.asm418
-rw-r--r--src/spdk/intel-ipsec-mb/avx/gcm128_avx_gen2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/gcm192_avx_gen2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/gcm256_avx_gen2.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/avx/gcm_avx_gen2.asm2515
-rw-r--r--src/spdk/intel-ipsec-mb/avx/kasumi_avx.c386
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_flush_avx.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_submit_avx.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_flush_avx.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_submit_avx.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_ccm_auth_submit_flush_avx.asm537
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_cmac_submit_flush_avx.asm518
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_flush_avx.asm239
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_submit_avx.asm194
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_flush_avx.asm264
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_submit_avx.asm272
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_avx.c733
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_flush_avx.asm298
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_flush_avx.asm321
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_submit_avx.asm355
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_flush_avx.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_submit_avx.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_flush_avx.asm356
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_submit_avx.asm428
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_flush_avx.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_submit_avx.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_flush_avx.asm339
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_submit_avx.asm416
-rw-r--r--src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_submit_avx.asm358
-rw-r--r--src/spdk/intel-ipsec-mb/avx/md5_x4x2_avx.asm716
-rw-r--r--src/spdk/intel-ipsec-mb/avx/pon_avx.asm1170
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha1_mult_avx.asm434
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha1_one_block_avx.asm501
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha224_one_block_avx.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha256_one_block_avx.asm553
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha384_one_block_avx.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm473
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha512_x2_avx.asm381
-rw-r--r--src/spdk/intel-ipsec-mb/avx/sha_256_mult_avx.asm391
-rw-r--r--src/spdk/intel-ipsec-mb/avx/snow3g_avx.c42
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/avx/zuc_avx.asm1146
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/avx/zuc_avx_top.c548
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/gcm128_avx_gen4.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/gcm192_avx_gen4.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/gcm256_avx_gen4.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/gcm_avx_gen4.asm3641
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_avx2.c676
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_flush_avx2.asm315
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_flush_avx2.asm362
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_submit_avx2.asm373
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_flush_avx2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_submit_avx2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_flush_avx2.asm379
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_submit_avx2.asm426
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_flush_avx2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_submit_avx2.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_flush_avx2.asm353
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_submit_avx2.asm416
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_submit_avx2.asm369
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/md5_x8x2_avx2.asm820
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/sha1_x8_avx2.asm466
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/sha256_oct_avx2.asm587
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/sha512_x4_avx2.asm452
-rw-r--r--src/spdk/intel-ipsec-mb/avx2/snow3g_avx2.c49
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/aes_cbc_dec_vaes_avx512.asm477
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/aes_cbc_enc_vaes_avx512.asm727
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/cntr_vaes_avx512.asm1524
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/des_x16_avx512.asm2382
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm128_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm128_vaes_avx512.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm192_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm192_vaes_avx512.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm256_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm256_vaes_avx512.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm_avx512.asm3536
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/gcm_vaes_avx512.asm4272
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_flush_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_submit_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_flush_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_submit_avx512.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_flush_avx512.asm320
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_submit_avx512.asm280
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_avx512.c1066
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_des_avx512.asm524
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_flush_avx512.asm367
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_flush_avx512.asm28
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_submit_avx512.asm28
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_flush_avx512.asm433
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_submit_avx512.asm445
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_flush_avx512.asm29
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_submit_avx512.asm29
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_flush_avx512.asm384
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_submit_avx512.asm413
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_submit_avx512.asm402
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/sha1_x16_avx512.asm439
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/sha256_x16_avx512.asm758
-rw-r--r--src/spdk/intel-ipsec-mb/avx512/sha512_x8_avx512.asm595
-rw-r--r--src/spdk/intel-ipsec-mb/const.asm89
-rw-r--r--src/spdk/intel-ipsec-mb/constants.asm66
-rw-r--r--src/spdk/intel-ipsec-mb/constants.h83
-rw-r--r--src/spdk/intel-ipsec-mb/cpu_feature.c230
-rw-r--r--src/spdk/intel-ipsec-mb/des.h111
-rw-r--r--src/spdk/intel-ipsec-mb/des_basic.c750
-rw-r--r--src/spdk/intel-ipsec-mb/des_key.c151
-rw-r--r--src/spdk/intel-ipsec-mb/gcm.c225
-rw-r--r--src/spdk/intel-ipsec-mb/include/aes_common.asm375
-rw-r--r--src/spdk/intel-ipsec-mb/include/aesni_emu.h120
-rw-r--r--src/spdk/intel-ipsec-mb/include/aesni_emu.inc247
-rw-r--r--src/spdk/intel-ipsec-mb/include/clear_regs.asm196
-rw-r--r--src/spdk/intel-ipsec-mb/include/clear_regs_mem.h53
-rw-r--r--src/spdk/intel-ipsec-mb/include/clear_regs_mem_fns.asm124
-rw-r--r--src/spdk/intel-ipsec-mb/include/const.inc163
-rw-r--r--src/spdk/intel-ipsec-mb/include/constant_lookup.asm561
-rw-r--r--src/spdk/intel-ipsec-mb/include/constant_lookup.h173
-rw-r--r--src/spdk/intel-ipsec-mb/include/cpu_feature.h52
-rw-r--r--src/spdk/intel-ipsec-mb/include/datastruct.asm235
-rw-r--r--src/spdk/intel-ipsec-mb/include/dbgprint.asm413
-rw-r--r--src/spdk/intel-ipsec-mb/include/des_utils.h134
-rw-r--r--src/spdk/intel-ipsec-mb/include/gcm.h428
-rw-r--r--src/spdk/intel-ipsec-mb/include/gcm_defines.asm272
-rw-r--r--src/spdk/intel-ipsec-mb/include/gcm_keys_avx2_avx512.asm52
-rw-r--r--src/spdk/intel-ipsec-mb/include/gcm_keys_sse_avx.asm73
-rw-r--r--src/spdk/intel-ipsec-mb/include/gcm_keys_vaes_avx512.asm231
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/include/kasumi_internal.h1853
-rw-r--r--src/spdk/intel-ipsec-mb/include/memcpy.asm613
-rw-r--r--src/spdk/intel-ipsec-mb/include/noaesni.h65
-rw-r--r--src/spdk/intel-ipsec-mb/include/os.asm58
-rw-r--r--src/spdk/intel-ipsec-mb/include/reg_sizes.asm300
-rw-r--r--src/spdk/intel-ipsec-mb/include/save_xmms.asm132
-rw-r--r--src/spdk/intel-ipsec-mb/include/save_xmms.h39
-rw-r--r--src/spdk/intel-ipsec-mb/include/snow3g.h511
-rw-r--r--src/spdk/intel-ipsec-mb/include/snow3g_common.h2840
-rw-r--r--src/spdk/intel-ipsec-mb/include/snow3g_internal.h638
-rw-r--r--src/spdk/intel-ipsec-mb/include/transpose_avx2.asm218
-rw-r--r--src/spdk/intel-ipsec-mb/include/transpose_avx512.asm497
-rw-r--r--src/spdk/intel-ipsec-mb/include/wireless_common.asm128
-rw-r--r--src/spdk/intel-ipsec-mb/include/wireless_common.h216
-rw-r--r--src/spdk/intel-ipsec-mb/include/zuc_common.asm740
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/include/zuc_internal.h432
-rw-r--r--src/spdk/intel-ipsec-mb/intel-ipsec-mb.h2409
-rw-r--r--src/spdk/intel-ipsec-mb/intel-ipsec-mb.spec110
-rw-r--r--src/spdk/intel-ipsec-mb/job_aes_hmac.asm144
-rw-r--r--src/spdk/intel-ipsec-mb/kasumi_iv.c79
-rw-r--r--src/spdk/intel-ipsec-mb/libIPSec_MB.def398
-rw-r--r--src/spdk/intel-ipsec-mb/libipsec-mb-dev.71
-rw-r--r--src/spdk/intel-ipsec-mb/libipsec-mb.7144
-rw-r--r--src/spdk/intel-ipsec-mb/mb_mgr_code.h1770
-rw-r--r--src/spdk/intel-ipsec-mb/mb_mgr_datastruct.asm330
-rw-r--r--src/spdk/intel-ipsec-mb/md5_one_block.c232
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_dec_by4_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_mac_x4_no_aesni.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_by4_sse_no_aesni.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_ccm_by4_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes192_cbc_dec_by4_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes192_cntr_by4_sse_no_aesni.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes256_cbc_dec_by4_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes256_cntr_by4_sse_no_aesni.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_128_x4_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_192_x4_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_256_x4_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_cfb_128_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_ecb_by4_sse_no_aesni.asm35
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aes_xcbc_mac_128_x4_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/aesni_emu.c375
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/gcm128_sse_no_aesni.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/gcm192_sse_no_aesni.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/gcm256_sse_no_aesni.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_flush_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_submit_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_flush_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_submit_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_cmac_submit_flush_sse_no_aesni.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_flush_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_submit_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_flush_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_submit_sse_no_aesni.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_sse_no_aesni.c734
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/pon_sse_no_aesni.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/no-aesni/snow3g_sse_no_aesni.c43
-rw-r--r--src/spdk/intel-ipsec-mb/sha_one_block.c575
-rw-r--r--src/spdk/intel-ipsec-mb/snow3g_iv.c97
-rw-r--r--src/spdk/intel-ipsec-mb/snow3g_tables.c757
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes128_cbc_dec_by4_sse.asm532
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes128_cbc_mac_x4.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes128_cntr_by4_sse.asm545
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes128_cntr_ccm_by4_sse.asm32
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes192_cbc_dec_by4_sse.asm590
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes192_cntr_by4_sse.asm470
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes256_cbc_dec_by4_sse.asm634
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes256_cntr_by4_sse.asm483
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_128_x4.asm380
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_192_x4.asm349
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_256_x4.asm368
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_cfb_128_sse.asm167
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_ecb_by4_sse.asm654
-rw-r--r--src/spdk/intel-ipsec-mb/sse/aes_xcbc_mac_128_x4.asm303
-rw-r--r--src/spdk/intel-ipsec-mb/sse/gcm128_sse.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/sse/gcm192_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/gcm256_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/gcm_sse.asm2586
-rw-r--r--src/spdk/intel-ipsec-mb/sse/kasumi_sse.c385
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_flush_sse.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_submit_sse.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_flush_sse.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_submit_sse.asm30
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_ccm_auth_submit_flush_sse.asm518
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_cmac_submit_flush_sse.asm502
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_flush_sse.asm217
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_submit_sse.asm187
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_flush_sse.asm242
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_submit_sse.asm263
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_ni_sse.asm305
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_sse.asm302
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_flush_sse.asm318
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_submit_sse.asm356
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_ni_sse.asm28
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_ni_sse.asm28
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_ni_sse.asm333
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_sse.asm356
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_ni_sse.asm401
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_sse.asm427
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_flush_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_submit_sse.asm31
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_flush_sse.asm331
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_submit_sse.asm412
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_ni_sse.asm370
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_sse.asm364
-rw-r--r--src/spdk/intel-ipsec-mb/sse/mb_mgr_sse.c809
-rw-r--r--src/spdk/intel-ipsec-mb/sse/md5_x4x2_sse.asm787
-rw-r--r--src/spdk/intel-ipsec-mb/sse/pon_sse.asm875
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha1_mult_sse.asm435
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha1_ni_x2_sse.asm493
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha1_one_block_sse.asm512
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha224_one_block_sse.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha256_ni_x2_sse.asm614
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha256_one_block_sse.asm512
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha384_one_block_sse.asm33
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha512_one_block_sse.asm480
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha512_x2_sse.asm449
-rw-r--r--src/spdk/intel-ipsec-mb/sse/sha_256_mult_sse.asm457
-rw-r--r--src/spdk/intel-ipsec-mb/sse/snow3g_sse.c42
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/sse/zuc_sse.asm1152
-rwxr-xr-xsrc/spdk/intel-ipsec-mb/sse/zuc_sse_top.c554
-rw-r--r--src/spdk/intel-ipsec-mb/version.c41
-rw-r--r--src/spdk/intel-ipsec-mb/win_x64.mak485
-rw-r--r--src/spdk/intel-ipsec-mb/zuc_iv.c103
318 files changed, 132045 insertions, 0 deletions
diff --git a/src/spdk/intel-ipsec-mb/.github/PULL_REQUEST_TEMPLATE.md b/src/spdk/intel-ipsec-mb/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100755
index 000000000..48180bff3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,34 @@
+<!--- Provide a general summary of your changes in the Title above -->
+
+## Description
+<!--- Describe your changes in detail -->
+
+## Affected parts
+<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
+<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
+- [ ] Library
+- [ ] LibTestApp
+- [ ] LibPerfApp
+- [ ] Other: (please specify)
+
+## Motivation and Context
+<!--- Why is this change required? What problem does it solve? -->
+<!--- If it fixes an open issue, please link to the issue here. -->
+
+## How Has This Been Tested?
+<!--- Please describe in detail how you tested your changes. -->
+<!--- Include details of your testing environment, tests ran to see how -->
+<!--- your change affects other areas of the code, etc. -->
+
+## Types of changes
+<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
+
+## Checklist:
+<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
+<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
+- [ ] My code follows the code style of this project.
+- [ ] My change requires a change to the documentation.
+- [ ] I have updated the documentation accordingly.
diff --git a/src/spdk/intel-ipsec-mb/.gitignore b/src/spdk/intel-ipsec-mb/.gitignore
new file mode 100644
index 000000000..bd20caf86
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/.gitignore
@@ -0,0 +1,6 @@
+*.o
+*.so
+*.a
+*~
+ipsec_MB_testapp
+ipsec_perf
diff --git a/src/spdk/intel-ipsec-mb/CONTRIBUTING b/src/spdk/intel-ipsec-mb/CONTRIBUTING
new file mode 100755
index 000000000..4190b04aa
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/CONTRIBUTING
@@ -0,0 +1,319 @@
+Contributing to intel-ipsec-mb
+==============================
+
+As an open source project, we welcome contributions of any kind.
+These can range from bug reports, code reviews and code development,
+to significant code or documentation features.
+
+Note:
+There is just one branch used in the project. All development is done on the
+master branch. Code taken from the tip of the master branch should not be
+considered fit for production.
+Refer to the releases tab for stable code versions:
+https://github.com/intel/intel-ipsec-mb/releases
+
+
+How can I contribute?
+=====================
+
+This document specifies contributing guidelines to the intel-ipsec-mb source
+tree. It covers some general guidelines, the preferred style and formatting
+for source files, additional requirements like documentation and development
+workflow. The goal of this document is to walk you through the concepts and
+specifics that should be understood while contributing to intel-ipsec-mb.
+
+
+Reporting Bugs
+==============
+
+Bugs should be reported via GitHub issues. The description should include
+a platform name, OS and kernel version, library version and detailed
+information on how to reproduce the bug.
+
+
+Suggesting Enhancements
+=======================
+
+Improvements should be reported via GitHub issues or pull requests.
+
+
+Creating Pull Requests
+======================
+
+Pull requests should be created using standard procedure available on GitHub.
+It is important to fill in all required information into a template. For major
+modifications (e.g. adding a new feature, refactoring), for effective
+development, it is recommended to share high level document with core
+development team via GitHub issue so that one can ask questions if one foresees
+issues that may occur in existing development.
+
+
+Coding Style Guides
+===================
+
+General Guidelines
+==================
+
+The rules and guidelines given in this document cannot cover every situation,
+so the following general guidelines should be used as a fallback:
+
+The code style should be consistent within each individual file.
+In the case of creating new files, the style should be consistent within
+each file in a given directory or module. The primary reason for coding
+standards is to increase code readability and comprehensibility, therefore
+always use whatever option will make the code easier to read. Line length
+is recommended to be not more than 80 characters, including comments.
+
+
+C
+=
+
+Formatting using checkpatch.pl
+==============================
+
+To format your code please use checkpatch.pl script (version 0.32) from
+Linux kernel
+(https://github.com/torvalds/linux/blob/master/scripts/checkpatch.pl).
+
+The top level Makefile contains a target "style" that can be used to check
+formatting. Please ensure the checkpatch.pl script has been added to your PATH.
+
+
+Indentation
+===========
+
+Tabs are 8 characters and thus indentations are also 8 characters.
+It should be consistent within each part of the code. When adding a new file,
+spaces should be used over tabs.
+
+
+C Comment Style
+===============
+
+Usual Comments
+==============
+
+These comments should be used in normal cases. To document a public API,
+a doxygen-like format must be used: refer to Doxygen Guidelines
+(http://www.doxygen.nl/manual/docblocks.html).
+
+/*
+ * VERY important single-line comments look like this.
+ */
+
+/* Most single-line comments look like this. */
+
+/*
+ * Multi-line comments look like this. Make them real sentences. Fill
+ * them so they look like real paragraphs.
+ */
+
+
+License Header
+==============
+
+Each file should begin with a special comment containing the appropriate
+copyright and license for the file. After any copyright header, a blank line
+should be left before any other contents, e.g. include statements in a C file.
+
+
+Preprocessor Directives (Header Includes)
+=========================================
+
+Include files from the local application directory are included using quotes,
+while includes from other paths are included using angle brackets: "<>".
+
+Example:
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "asm.h"
+
+
+Header File Guards
+==================
+
+Headers should be protected against multiple inclusion with the usual:
+
+#ifndef _FILE_H_
+#define _FILE_H_
+
+/* Code */
+
+#endif /* _FILE_H_ */
+
+
+Macros
+======
+
+You can define a macro similar in C using #define preprocessor directive.
+
+For example:
+
+/**
+ * ---------------------------------------
+ * Local macros
+ * ---------------------------------------
+ */
+
+/*
+ * Custom ASSERT and DIM macros
+ */
+#ifdef DEBUG
+#include <assert.h>
+#define IMB_ASSERT(x) assert(x)
+#else
+#define IMB_ASSERT(x)
+#endif
+
+#ifndef IMB_DIM
+#define IMB_DIM(x) (sizeof(x) / sizeof(x[0]))
+#endif
+
+
+ASM
+===
+
+Syntax
+======
+
+Intel syntax should be used always.
+
+
+Register Naming
+===============
+
+Virtual registers with meaningful names should be used over direct register
+names when possible.
+
+
+Indentation
+===========
+
+Tabs are 8 characters and thus indentations are also 8 characters.
+To improve readability, instructions should be preceded by a single indent
+and followed by one or more indents in order to align the first operand.
+Spaces should be used over tabs.
+
+Example:
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+
+
+Comment Style
+=============
+
+Two semicolons should be used for comment lines and a single semicolon
+for end of line comments.
+
+Example:
+ ;; first phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+
+Macros
+======
+
+Macros should be used where possible to reduce code duplication. All macros
+should be properly documented, specifiying input/output parameters and
+their types.
+
+Example:
+%macro AESROUND_1_TO_16_BLOCKS 5
+%define %%L0B0_3 %1 ; [in/out] ZMM; blocks 0 to 3
+%define %%L0B4_7 %2 ; [in/out] ZMM; blocks 4 to 7
+%define %%KEY %3 ; [in] ZMM containing round key
+%define %%ROUND %4 ; [in] numerical value containing round number
+%define %%IA0 %5 ; [clobbered] temp GP register
+
+Macros should be located within or before the .text section in the file.
+
+
+License Header
+==============
+
+Each file should begin with a special comment containing the appropriate
+copyright and license for the file. After any copyright header, a blank line
+should be left before any other contents.
+
+
+File and Code Structure
+=======================
+
+New files should be structured in the following layout:
+ 1. License header
+ 2. .data section
+ 3. .text section
+
+Please see avx512/cntr_vaes_avx512.asm for an example.
+All new modules should compile to produce relocatable code.
+
+
+Public APIs in the library
+==========================
+
+All functions that are exposed by the library must have their prototypes
+defined in intel-ipsec-mb.h and symbols added to libIPSec_MB.def.
+
+
+Documentation
+=============
+
+Please make sure to update documentation when necessary. If not possible
+(e.g. not allowed to edit wiki), propose necessary changes.
+
+
+Git Commit Messages
+===================
+
+Git commit messages should start with a short 50 character or less summary
+in a single paragraph. Ideally, it should start with a short prefix
+followed by a colon describing which part of the code it modifies
+e.g. "LibTestApp: extended AES-CBC tests".
+
+
+Development Workflow
+====================
+
+Clone a repository in the usual way, for example:
+
+git clone https://github.com/intel/intel-ipsec-mb
+
+Once your local repository is set up as above, you must use
+the following workflow.
+
+Make sure you have the latest upstream changes:
+
+git remote update
+git checkout master
+git pull origin master
+
+
+Committing a Change
+===================
+
+Make your changes, commit them, and submit them for review:
+
+git commit -a
+
+To see how to create pull requests on GitHub, please refer to "About pull
+requests" help page (https://help.github.com/articles/about-pull-requests/).
+
+Note: Please ensure that you have your username and email address set correctly
+to let other developers know about your contribution:
+
+git config --global user.name "Firstname Lastname"
+git config --global user.email "your_email@youremail.com"
+
+
+Licenses
+========
+
+The code in this repository is licensed under BSD license (see LICENSE file).
diff --git a/src/spdk/intel-ipsec-mb/LICENSE b/src/spdk/intel-ipsec-mb/LICENSE
new file mode 100644
index 000000000..e4b394446
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2012-2018, Intel Corporation
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/Makefile b/src/spdk/intel-ipsec-mb/LibPerfApp/Makefile
new file mode 100644
index 000000000..0880d5e29
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/Makefile
@@ -0,0 +1,87 @@
+# Copyright (c) 2017-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+EXE=ipsec_perf
+INSTPATH ?= /usr/include/intel-ipsec-mb.h
+
+CFLAGS = -DLINUX -D_GNU_SOURCE $(INCLUDES) \
+ -W -Wall -Wextra -Wmissing-declarations -Wpointer-arith \
+ -Wcast-qual -Wundef -Wwrite-strings \
+ -Wformat -Wformat-security \
+ -Wunreachable-code -Wmissing-noreturn -Wsign-compare -Wno-endif-labels \
+ -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition \
+ -pthread -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv
+
+LDFLAGS = -fPIE -z noexecstack -z relro -z now -pthread
+LDLIBS = -lIPSec_MB
+
+ifeq ("$(shell test -e $(INSTPATH) && echo -n yes)","yes")
+# library installed
+CFLAGS +=
+else
+# library not installed
+CFLAGS += -I../include -I../
+LDFLAGS += -L../
+endif
+
+ifeq ($(DEBUG),y)
+CFLAGS += -g -DDEBUG -O0
+LDFLAGS += -g
+else
+CFLAGS += -O3 -fPIE -fstack-protector -D_FORTIFY_SOURCE=2
+endif
+
+ifeq ($(GCM_BIG_DATA),y)
+CFLAGS += -DGCM_BIG_DATA
+endif
+
+SOURCES := ipsec_perf.c msr.c
+OBJECTS := $(SOURCES:%.c=%.o)
+
+CHECKPATCH ?= checkpatch.pl
+CPPCHECK ?= cppcheck
+
+.PHONY: all clean style cppcheck
+
+all: $(EXE)
+
+$(EXE): $(OBJECTS)
+ $(CC) $(LDFLAGS) $^ $(LDLIBS) -o $@
+
+ipsec_perf.o: $(SOURCES)
+
+.PHONY: clean
+clean:
+ -rm -f $(OBJECTS)
+ -rm -f $(EXE)
+
+SOURCES_STYLE := $(foreach infile,$(SOURCES),-f $(infile))
+CHECKPATCH?=checkpatch.pl
+.PHONY: style
+style:
+ $(CHECKPATCH) --no-tree --no-signoff --emacs --no-color \
+--ignore CODE_INDENT,INITIALISED_STATIC,LEADING_SPACE,SPLIT_STRING,\
+UNSPECIFIED_INT,ARRAY_SIZE,BLOCK_COMMENT_STYLE,GLOBAL_INITIALISERS,\
+COMPLEX_MACRO,SPACING,STORAGE_CLASS $(SOURCES_STYLE)
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/README b/src/spdk/intel-ipsec-mb/LibPerfApp/README
new file mode 100644
index 000000000..dad423d9a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/README
@@ -0,0 +1,82 @@
+========================================================================
+README for Intel(R) Multi-Buffer Crypto for IPsec Library API
+performance measurement tool
+
+February 2017
+========================================================================
+
+
+Contents
+========
+
+- Overview
+- Files
+- Compilation
+- Usage
+- Legal Disclaimer
+
+
+Overview
+========
+This test tool performs multiple execution of functions included in
+Intel Multi-Buffer Crypto for IPsec Library.
+
+Files
+=====
+
+ipsec_perf.c - Tool which produces text formatted output representing
+ average times of ipsec_mb functions execution.
+ipsec_diff_tool.py - Another tool which interprets text data given.
+
+Compilation
+===========
+
+Required tools:
+- GNU make
+- gcc (GCC) 4.8.3 (or newer)
+
+Simply run "make" to compile the tool.
+To clean the build please run "make clean".
+
+You can point to another directory contaning IPSec MB library by setting
+LIB_LOC. for ex:
+ LIB_LOC=../ipsec_mb_lib make
+
+In order to perform static code analysis or style check you can do:
+ make cppcheck
+or
+ make style
+
+Be aware that you will have cppcheck tool installed and checkpatch.pl
+script copied into one of the directories listed in $PATH.
+You can also set CPPCHECK and/or CHECKPATCH variables if you want give paths
+to this tools being placed in different directories. for ex:
+ CPPCHECK=~/tools/cppcheck make cppcheck
+ CHECKPATCH=~/scripts/checkpatch.pl make style
+
+Usage
+=====
+
+You can simply check list of arguments by typing:
+ ./ipsec_perf -h
+
+Usage example:
+ ./ipsec_perf -c --no-avx512 --no-gcm -o 24
+
+Later you can pass output to ipsec_diff_tool.py for data
+analysis:
+ ./ipsec_diff_tool.py out1.txt out2.txt 5
+
+Run ipsec_diff_tool.py -h too see help page.
+
+Legal Disclaimer
+================
+
+THIS SOFTWARE IS PROVIDED BY INTEL"AS IS". NO LICENSE, EXPRESS OR
+IMPLIED, BY ESTOPPEL OR OTHERWISE, TO ANY INTELLECTUAL PROPERTY RIGHTS
+ARE GRANTED THROUGH USE. EXCEPT AS PROVIDED IN INTEL'S TERMS AND
+CONDITIONS OF SALE, INTEL ASSUMES NO LIABILITY WHATSOEVER AND INTEL
+DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTY, RELATING TO SALE AND/OR
+USE OF INTEL PRODUCTS INCLUDING LIABILITY OR WARRANTIES RELATING TO
+FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, OR INFRINGEMENT
+OF ANY PATENT, COPYRIGHT OR OTHER INTELLECTUAL PROPERTY RIGHT.
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_diff_tool.py b/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_diff_tool.py
new file mode 100755
index 000000000..1e8219f53
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_diff_tool.py
@@ -0,0 +1,308 @@
+#!/usr/bin/env python
+
+"""
+**********************************************************************
+ Copyright(c) 2017-2018, Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************
+"""
+
+import sys
+
+# Number of parameters (ARCH, CIPHER_MODE, DIR, HASH_ALG, KEY_SIZE)
+PAR_NUM = 5
+
+class Variant(object):
+ """
+ Class representing one test including chosen parameters and
+ results of average execution times
+ """
+ def __init__(self, **args):
+ self.params = (args['arch'], args['cipher'], args['dir'], args['alg'],
+ args['keysize'])
+
+ self.avg_times = []
+ self.slope = None
+ self.intercept = None
+
+ def set_times(self, avg_times):
+ """
+ Fills test execution time list
+ """
+ self.avg_times = avg_times
+
+ def lin_reg(self, sizes):
+ """
+ Computes linear regression of set of coordinates (x,y)
+ """
+
+ n = len(sizes)
+
+ if n != len(self.avg_times):
+ print "Error!"
+ return None
+
+ sumx = sum(sizes)
+ sumy = sum(self.avg_times)
+ sumxy = sum([x * y for x, y in zip(sizes, self.avg_times)])
+ sumsqrx = sum([pow(x, 2) for x in sizes])
+ self.slope = (n * sumxy - sumx * sumy) / float(n * sumsqrx - pow(sumx, 2))
+ self.intercept = (sumy - self.slope * sumx) / float(n)
+
+ def get_params_str(self):
+ """
+ Returns all parameters concatenated into one string
+ """
+ return "\t".join(i for i in self.params)
+
+ def get_lin_func_str(self):
+ """
+ Returns string having linear coefficients
+ """
+ slope = "{:.5f}".format(self.slope)
+ intercept = "{:.5f}".format(self.intercept)
+ return "{}\t{}".format(slope, intercept)
+
+class VarList(list):
+ """
+ Class used to store all test variants as a list of objects
+ """
+
+ def find_obj(self, params):
+ """
+ Finds first occurence of object containing given parameters
+ """
+ ret_val = None
+ matches = (obj for obj in self if obj.params == params)
+ try:
+ ret_val = next(matches)
+ except StopIteration:
+ pass
+ return ret_val
+
+ def compare(self, list_b, tolerance):
+ """
+ Finds variants from two data sets which are matching and compares
+ its linear regression coefficients.
+ Compares list_b against itself.
+ """
+
+ if tolerance is None:
+ tolerance = 5.0
+ if tolerance < 0.0:
+ print "Bad argument: Tolerance must not be less than 0%"
+ exit(1)
+ print "TOLERANCE: {:.2f}%".format(tolerance)
+
+ warning = False
+ print "NO\tARCH\tCIPHER\tDIR\tHASH\tKEYSZ\tSLOPE A\tINTERCEPT A\tSLOPE B\tINTERCEPT B"
+ for i, obj_a in enumerate(self):
+ obj_b = list_b.find_obj(obj_a.params)
+ if obj_b != None:
+ if obj_a.slope < 0.0:
+ obj_a.slope = 0
+ if obj_b.slope < 0.0:
+ obj_b.slope = 0
+ slope_bv = 0.01 * tolerance * obj_a.slope # border value
+ intercept_bv = 0.01 * tolerance * obj_a.intercept
+ diff_slope = obj_b.slope - obj_a.slope
+ diff_intercept = obj_b.intercept - obj_a.intercept
+ if (obj_a.slope > 0.001 and obj_b.slope > 0.001 and
+ diff_slope > slope_bv) or diff_intercept > intercept_bv:
+ warning = True
+ print "{}\t{}\t{}\t{}".format(i + 1,
+ obj_b.get_params_str(),
+ obj_a.get_lin_func_str(),
+ obj_b.get_lin_func_str())
+ if not warning:
+ print "No differences found."
+ return warning
+
+ def printout(self):
+ """
+ Prints out readable representation of the list
+ """
+
+ print "NO\tARCH\tCIPHER\tDIR\tHASH\tKEYSZ\tSLOPE \tINTERCEPT"
+ for i, obj in enumerate(self):
+ print "{}\t{}\t{}".format(i + 1,
+ obj.get_params_str(),
+ obj.get_lin_func_str())
+
+
+
+class Parser(object):
+ """
+ Class used to parse a text file contaning performance data
+ """
+
+ def __init__(self, fname, verbose):
+ self.fname = fname
+ self.verbose = verbose
+
+ @staticmethod
+ def convert2int(in_tuple):
+ """
+ Converts a tuple of strings into a list of integers
+ """
+
+ result = list(in_tuple) # Converting to list
+ result = [int(i) for i in result] # Converting str to int
+ return result
+
+ def load(self):
+ """
+ Reads a text file by columns, stores data in objects
+ for further comparision of performance
+ """
+
+ v_list = VarList()
+ # Reading by columns, results in list of tuples
+ # Each tuple is representing a column from a text file
+ try:
+ f = open(self.fname, 'r')
+ except IOError:
+ print "Error reading {} file.".format(self.fname)
+ exit(1)
+ else:
+ with f:
+ cols = zip(*(line.strip().split('\t') for line in f))
+
+ # Reading first column with payload sizes, ommiting first 5 rows
+ sizes = self.convert2int(cols[0][PAR_NUM:])
+ if self.verbose:
+ print "Available buffer sizes:\n"
+ print sizes
+ print "========================================================"
+ print "\n\nVariants:\n"
+
+ # Reading remaining columns contaning performance data
+ for row in cols[1:]:
+ # First rows are run options
+ arch, c_mode, c_dir, h_alg, key_size = row[:PAR_NUM]
+ if self.verbose:
+ print arch, c_mode, c_dir, h_alg, key_size
+
+ # Getting average times
+ avg_times = self.convert2int(row[PAR_NUM:])
+ if self.verbose:
+ print avg_times
+ print "------"
+
+ # Putting new object to the result list
+ v_list.append(Variant(arch=arch, cipher=c_mode, dir=c_dir,
+ alg=h_alg, keysize=key_size))
+ v_list[-1].set_times(avg_times)
+ # Finding linear function representation of data set
+ v_list[-1].lin_reg(sizes)
+ if self.verbose:
+ print "({}, {})".format(v_list[-1].slope, v_list[-1].intercept)
+ print "============\n"
+ return v_list, sizes
+
+class DiffTool(object):
+ """
+ Main class
+ """
+
+ def __init__(self):
+ self.fname_a = None
+ self.fname_b = None
+ self.tolerance = None
+ self.verbose = False
+ self.analyze = False
+
+ @staticmethod
+ def usage():
+ """
+ Prints usage
+ """
+ print "This tool compares file_b against file_a printing out differences."
+ print "Usage:"
+ print "\tipsec_diff_tool.py [-v] [-a] file_a file_b [tol]\n"
+ print "\t-v - verbose"
+ print "\t-a - takes only one argument: name of the file to analyze"
+ print "\tfile_a, file_b - text files containing output from ipsec_perf tool"
+ print "\ttol - tolerance [%], must be >= 0, default 5\n"
+ print "Examples:"
+ print "\tipsec_diff_tool.py file01.txt file02.txt 10"
+ print "\tipsec_diff_tool.py -a file02.txt"
+ print "\tipsec_diff_tool.py -v -a file01.txt"
+
+
+ def parse_args(self):
+ """
+ Get commandline arguments
+ """
+ if len(sys.argv) < 3 or sys.argv[1] == "-h":
+ self.usage()
+ exit(1)
+ if sys.argv[1] == "-a":
+ self.analyze = True
+ self.fname_a = sys.argv[2]
+ elif sys.argv[2] == "-a":
+ if sys.argv[1] == "-v":
+ self.verbose = True
+ self.analyze = True
+ self.fname_a = sys.argv[3]
+ elif sys.argv[1] == "-v":
+ self.verbose = True
+ self.fname_a = sys.argv[2]
+ self.fname_b = sys.argv[3]
+ if len(sys.argv) >= 5:
+ self.tolerance = float(sys.argv[4])
+
+ else:
+ self.fname_a = sys.argv[1]
+ self.fname_b = sys.argv[2]
+ if len(sys.argv) >= 4:
+ self.tolerance = float(sys.argv[3])
+
+ def run(self):
+ """
+ Main method
+ """
+ self.parse_args()
+
+ parser_a = Parser(self.fname_a, self.verbose)
+ list_a, sizes_a = parser_a.load()
+
+ if not self.analyze:
+ parser_b = Parser(self.fname_b, self.verbose)
+ list_b, sizes_b = parser_b.load()
+ if sizes_a != sizes_b:
+ print "Error. Buffer size lists in two compared " \
+ "data sets differ! Aborting.\n"
+ exit(1)
+ warning = list_a.compare(list_b, self.tolerance) # Compares list_b against list_a
+ if warning:
+ exit(2)
+ else:
+ list_a.printout() # Takes only one file and prints it out
+
+if __name__ == '__main__':
+ DiffTool().run()
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_perf.c b/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_perf.c
new file mode 100644
index 000000000..af81cdd72
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/ipsec_perf.c
@@ -0,0 +1,2581 @@
+/**********************************************************************
+ Copyright(c) 2017-2019, Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <malloc.h> /* memalign() or _aligned_malloc()/aligned_free() */
+
+#ifdef _WIN32
+#include <windows.h>
+#include <process.h>
+#include <intrin.h>
+#define strdup _strdup
+#define __forceinline static __forceinline
+#else
+#include <x86intrin.h>
+#define __forceinline static inline __attribute__((always_inline))
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#endif
+
+#include <intel-ipsec-mb.h>
+
+#include "msr.h"
+
+/* memory size for test buffers */
+#define BUFSIZE (512 * 1024 * 1024)
+/* maximum size of a test buffer */
+#define JOB_SIZE_TOP (16 * 1024)
+/* min size of a buffer when testing range of buffers */
+#define DEFAULT_JOB_SIZE_MIN 16
+/* max size of a buffer when testing range of buffers */
+#define DEFAULT_JOB_SIZE_MAX (2 * 1024)
+/* number of bytes to increase buffer size when testing range of buffers */
+#define DEFAULT_JOB_SIZE_STEP 16
+/* max offset applied to a buffer - this is to avoid collisions in L1 */
+#define MAX_BUFFER_OFFSET 4096
+/* max value of sha_size_incr */
+#define MAX_SHA_SIZE_INCR 128
+/* region size for one buffer rounded up to 4K page size */
+#define REGION_SIZE (((JOB_SIZE_TOP + (MAX_BUFFER_OFFSET + \
+ MAX_SHA_SIZE_INCR)) + 4095) & (~4095))
+/* number of test buffers */
+#define NUM_OFFSETS (BUFSIZE / REGION_SIZE)
+#define NUM_RUNS 16
+/* maximum number of 128-bit expanded keys */
+#define KEYS_PER_JOB 15
+
+#define AAD_SIZE_MAX JOB_SIZE_TOP
+#define CCM_AAD_SIZE_MAX 46
+#define DEFAULT_GCM_AAD_SIZE 12
+#define DEFAULT_CCM_AAD_SIZE 8
+
+#define ITER_SCALE_SMOKE 2048
+#define ITER_SCALE_SHORT 200000
+#define ITER_SCALE_LONG 2000000
+
+#define BITS(x) (sizeof(x) * 8)
+#define DIM(x) (sizeof(x)/sizeof(x[0]))
+
+#define MAX_NUM_THREADS 16 /* Maximum number of threads that can be created */
+
+#define CIPHER_MODES_AES 7 /* CBC, CNTR, CNTR+8, CNTR_BITLEN,
+ CNTR_BITLEN-4, ECB, NULL_CIPHER */
+#define CIPHER_MODES_DOCSIS 4 /* AES DOCSIS, AES DOCSIS+8, DES DOCSIS,
+ DES DOCSIS+8 */
+#define CIPHER_MODES_DES 1 /* DES */
+#define CIPHER_MODES_GCM 1 /* GCM */
+#define CIPHER_MODES_CCM 1 /* CCM */
+#define CIPHER_MODES_3DES 1 /* 3DES */
+#define CIPHER_MODES_PON 2 /* PON, NO_CTR PON */
+#define DIRECTIONS 2 /* ENC, DEC */
+#define HASH_ALGS_AES 10 /* SHA1, SHA256, SHA224, SHA384, SHA512, XCBC,
+ MD5, NULL_HASH, CMAC, CMAC_BITLEN */
+#define HASH_ALGS_DOCSIS 1 /* NULL_HASH */
+#define HASH_ALGS_GCM 1 /* GCM */
+#define HASH_ALGS_CCM 1 /* CCM */
+#define HASH_ALGS_DES 1 /* NULL_HASH for DES */
+#define HASH_ALGS_3DES 1 /* NULL_HASH for 3DES */
+#define HASH_ALGS_PON 1 /* CRC32/BIP for PON */
+#define KEY_SIZES_AES 3 /* 16, 24, 32 */
+#define KEY_SIZES_DOCSIS 1 /* 16 or 8 */
+#define KEY_SIZES_GCM 3 /* 16, 24, 32 */
+#define KEY_SIZES_CCM 1 /* 16 */
+#define KEY_SIZES_DES 1 /* 8 */
+#define KEY_SIZES_3DES 1 /* 8 x 3 */
+#define KEY_SIZES_PON 1 /* 16 */
+
+#define IA32_MSR_FIXED_CTR_CTRL 0x38D
+#define IA32_MSR_PERF_GLOBAL_CTR 0x38F
+#define IA32_MSR_CPU_UNHALTED_THREAD 0x30A
+
+/* Those defines tell how many different test cases are to be performed.
+ * Have to be multiplied by number of chosen architectures.
+ */
+#define VARIANTS_PER_ARCH_AES (CIPHER_MODES_AES * DIRECTIONS * \
+ HASH_ALGS_AES * KEY_SIZES_AES)
+#define VARIANTS_PER_ARCH_DOCSIS (CIPHER_MODES_DOCSIS * DIRECTIONS * \
+ HASH_ALGS_DOCSIS * KEY_SIZES_DOCSIS)
+#define VARIANTS_PER_ARCH_GCM (CIPHER_MODES_GCM * DIRECTIONS * \
+ HASH_ALGS_GCM * KEY_SIZES_GCM)
+#define VARIANTS_PER_ARCH_CCM (CIPHER_MODES_CCM * DIRECTIONS * \
+ HASH_ALGS_CCM * KEY_SIZES_CCM)
+#define VARIANTS_PER_ARCH_DES (CIPHER_MODES_DES * DIRECTIONS * \
+ HASH_ALGS_DES * KEY_SIZES_DES)
+#define VARIANTS_PER_ARCH_3DES (CIPHER_MODES_3DES * DIRECTIONS * \
+ HASH_ALGS_3DES * KEY_SIZES_3DES)
+#define VARIANTS_PER_ARCH_PON (CIPHER_MODES_PON * DIRECTIONS * \
+ HASH_ALGS_PON * KEY_SIZES_PON)
+
+enum arch_type_e {
+ ARCH_SSE = 0,
+ ARCH_AVX,
+ ARCH_AVX2,
+ ARCH_AVX512,
+ NUM_ARCHS
+};
+
+enum test_type_e {
+ TTYPE_AES_HMAC,
+ TTYPE_AES_DOCSIS,
+ TTYPE_AES_GCM,
+ TTYPE_AES_CCM,
+ TTYPE_AES_DES,
+ TTYPE_AES_3DES,
+ TTYPE_PON,
+ TTYPE_CUSTOM,
+ NUM_TTYPES
+};
+
+/* This enum will be mostly translated to JOB_CIPHER_MODE
+ * (make sure to update c_mode_names list in print_times function) */
+enum test_cipher_mode_e {
+ TEST_CBC = 1,
+ TEST_CNTR,
+ TEST_CNTR8, /* CNTR with increased buffer by 8 */
+ TEST_CNTR_BITLEN, /* CNTR-BITLEN */
+ TEST_CNTR_BITLEN4, /* CNTR-BITLEN with 4 less bits in the last byte */
+ TEST_ECB,
+ TEST_NULL_CIPHER,
+ TEST_AESDOCSIS,
+ TEST_AESDOCSIS8, /* AES DOCSIS with increased buffer size by 8 */
+ TEST_DESDOCSIS,
+ TEST_DESDOCSIS4, /* DES DOCSIS with increased buffer size by 4 */
+ TEST_GCM, /* Additional field used by GCM, not translated */
+ TEST_CCM,
+ TEST_DES,
+ TEST_3DES,
+ TEST_PON_CNTR,
+ TEST_PON_NO_CNTR,
+ TEST_NUM_CIPHER_TESTS
+};
+
+/* This enum will be mostly translated to JOB_HASH_ALG
+ * (make sure to update h_alg_names list in print_times function) */
+enum test_hash_alg_e {
+ TEST_SHA1 = 1,
+ TEST_SHA_224,
+ TEST_SHA_256,
+ TEST_SHA_384,
+ TEST_SHA_512,
+ TEST_XCBC,
+ TEST_MD5,
+ TEST_HASH_CMAC, /* added here to be included in AES tests */
+ TEST_HASH_CMAC_BITLEN,
+ TEST_NULL_HASH,
+ TEST_HASH_GCM, /* Additional field used by GCM, not translated */
+ TEST_CUSTOM_HASH, /* unused */
+ TEST_HASH_CCM,
+ TEST_PON_CRC_BIP,
+ TEST_NUM_HASH_TESTS
+};
+
+/* Struct storing cipher parameters */
+struct params_s {
+ JOB_CIPHER_DIRECTION cipher_dir;
+ enum test_type_e test_type; /* AES, DOCSIS, GCM */
+ enum test_cipher_mode_e cipher_mode;
+ enum test_hash_alg_e hash_alg;
+ uint32_t aes_key_size;
+ uint32_t size_aes;
+ uint64_t aad_size;
+ uint32_t num_sizes;
+ uint32_t num_variants;
+ uint32_t core;
+};
+
+struct custom_job_params {
+ enum test_cipher_mode_e cipher_mode;
+ enum test_hash_alg_e hash_alg;
+ uint32_t aes_key_size;
+ JOB_CIPHER_DIRECTION cipher_dir;
+};
+
+union params {
+ enum arch_type_e arch_type;
+ struct custom_job_params job_params;
+};
+
+struct str_value_mapping {
+ const char *name;
+ union params values;
+};
+
+struct str_value_mapping arch_str_map[] = {
+ {.name = "SSE", .values.arch_type = ARCH_SSE },
+ {.name = "AVX", .values.arch_type = ARCH_AVX },
+ {.name = "AVX2", .values.arch_type = ARCH_AVX2 },
+ {.name = "AVX512", .values.arch_type = ARCH_AVX512 }
+};
+
+struct str_value_mapping cipher_algo_str_map[] = {
+ {
+ .name = "aes-cbc-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CBC,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-cbc-192",
+ .values.job_params = {
+ .cipher_mode = TEST_CBC,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-cbc-256",
+ .values.job_params = {
+ .cipher_mode = TEST_CBC,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-192",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-256",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr8-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR8,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr8-192",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR8,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr8-256",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR8,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-192",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-256",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit4-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN4,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit4-192",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN4,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit4-256",
+ .values.job_params = {
+ .cipher_mode = TEST_CNTR_BITLEN4,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-128",
+ .values.job_params = {
+ .cipher_mode = TEST_ECB,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-192",
+ .values.job_params = {
+ .cipher_mode = TEST_ECB,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-256",
+ .values.job_params = {
+ .cipher_mode = TEST_ECB,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-docsis",
+ .values.job_params = {
+ .cipher_mode = TEST_AESDOCSIS,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-docsis8",
+ .values.job_params = {
+ .cipher_mode = TEST_AESDOCSIS8,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "des-docsis",
+ .values.job_params = {
+ .cipher_mode = TEST_DESDOCSIS,
+ .aes_key_size = 8
+ }
+ },
+ {
+ .name = "des-docsis4",
+ .values.job_params = {
+ .cipher_mode = TEST_DESDOCSIS4,
+ .aes_key_size = 8
+ }
+ },
+ {
+ .name = "des-cbc",
+ .values.job_params = {
+ .cipher_mode = TEST_DES,
+ .aes_key_size = 8
+ }
+ },
+ {
+ .name = "3des-cbc",
+ .values.job_params = {
+ .cipher_mode = TEST_3DES,
+ .aes_key_size = 8
+ }
+ },
+ {
+ .name = "null",
+ .values.job_params = {
+ .cipher_mode = TEST_NULL_CIPHER,
+ .aes_key_size = 0
+ }
+ }
+};
+
+struct str_value_mapping hash_algo_str_map[] = {
+ {
+ .name = "sha1-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_SHA1
+ }
+ },
+ {
+ .name = "sha224-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_SHA_224
+ }
+ },
+ {
+ .name = "sha256-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_SHA_256
+ }
+ },
+ {
+ .name = "sha384-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_SHA_384
+ }
+ },
+ {
+ .name = "sha512-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_SHA_512
+ }
+ },
+ {
+ .name = "aes-xcbc",
+ .values.job_params = {
+ .hash_alg = TEST_XCBC
+ }
+ },
+ {
+ .name = "md5-hmac",
+ .values.job_params = {
+ .hash_alg = TEST_MD5
+ }
+ },
+ {
+ .name = "aes-cmac",
+ .values.job_params = {
+ .hash_alg = TEST_HASH_CMAC
+ }
+ },
+ {
+ .name = "null",
+ .values.job_params = {
+ .hash_alg = TEST_NULL_HASH
+ }
+ },
+ {
+ .name = "aes-cmac-bitlen",
+ .values.job_params = {
+ .hash_alg = TEST_HASH_CMAC_BITLEN
+ }
+ },
+};
+
+struct str_value_mapping aead_algo_str_map[] = {
+ {
+ .name = "aes-gcm-128",
+ .values.job_params = {
+ .cipher_mode = TEST_GCM,
+ .hash_alg = TEST_HASH_GCM,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-gcm-192",
+ .values.job_params = {
+ .cipher_mode = TEST_GCM,
+ .hash_alg = TEST_HASH_GCM,
+ .aes_key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-gcm-256",
+ .values.job_params = {
+ .cipher_mode = TEST_GCM,
+ .hash_alg = TEST_HASH_GCM,
+ .aes_key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ccm-128",
+ .values.job_params = {
+ .cipher_mode = TEST_CCM,
+ .hash_alg = TEST_HASH_CCM,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "pon-128",
+ .values.job_params = {
+ .cipher_mode = TEST_PON_CNTR,
+ .hash_alg = TEST_PON_CRC_BIP,
+ .aes_key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "pon-128-no-ctr",
+ .values.job_params = {
+ .cipher_mode = TEST_PON_NO_CNTR,
+ .hash_alg = TEST_PON_CRC_BIP,
+ .aes_key_size = 0
+ }
+ },
+};
+
+struct str_value_mapping cipher_dir_str_map[] = {
+ {.name = "encrypt", .values.job_params.cipher_dir = ENCRYPT},
+ {.name = "decrypt", .values.job_params.cipher_dir = DECRYPT}
+};
+
+/* This struct stores all information about performed test case */
+struct variant_s {
+ uint32_t arch;
+ struct params_s params;
+ uint64_t *avg_times;
+};
+
+/* Struct storing information to be passed to threads */
+struct thread_info {
+ int print_info;
+ int core;
+ MB_MGR *p_mgr;
+} t_info[MAX_NUM_THREADS];
+
+enum cache_type_e {
+ WARM = 0,
+ COLD = 1
+};
+
+enum cache_type_e cache_type = WARM;
+
+const uint32_t auth_tag_length_bytes[19] = {
+ 12, /* SHA1 */
+ 14, /* SHA_224 */
+ 16, /* SHA_256 */
+ 24, /* SHA_384 */
+ 32, /* SHA_512 */
+ 12, /* AES_XCBC */
+ 12, /* MD5 */
+ 0, /* NULL_HASH */
+#ifndef NO_GCM
+ 16, /* AES_GMAC */
+#endif
+ 0, /* CUSTOM HASH */
+ 0, /* AES_CCM */
+ 16, /* AES_CMAC */
+ 20, /* PLAIN_SHA1 */
+ 28, /* PLAIN_SHA_224 */
+ 32, /* PLAIN_SHA_256 */
+ 48, /* PLAIN_SHA_384 */
+ 64, /* PLAIN_SHA_512 */
+ 4, /* AES_CMAC_BITLEN (3GPP) */
+ 8, /* PON */
+};
+uint32_t index_limit;
+uint32_t key_idxs[NUM_OFFSETS];
+uint32_t offsets[NUM_OFFSETS];
+uint32_t sha_size_incr = 24;
+
+enum range {
+ RANGE_MIN = 0,
+ RANGE_STEP,
+ RANGE_MAX,
+ NUM_RANGE
+};
+
+uint32_t job_sizes[NUM_RANGE] = {DEFAULT_JOB_SIZE_MIN,
+ DEFAULT_JOB_SIZE_STEP,
+ DEFAULT_JOB_SIZE_MAX};
+uint32_t job_iter = 0;
+uint64_t gcm_aad_size = DEFAULT_GCM_AAD_SIZE;
+uint64_t ccm_aad_size = DEFAULT_CCM_AAD_SIZE;
+
+struct custom_job_params custom_job_params = {
+ .cipher_mode = TEST_NULL_CIPHER,
+ .hash_alg = TEST_NULL_HASH,
+ .aes_key_size = 0,
+ .cipher_dir = ENCRYPT
+};
+
+uint8_t archs[NUM_ARCHS] = {1, 1, 1, 1}; /* uses all function sets */
+/* AES, DOCSIS, GCM, CCM, DES, 3DES, PON, CUSTOM */
+uint8_t test_types[NUM_TTYPES] = {1, 1, 1, 1, 1, 1, 1, 0};
+
+int use_gcm_job_api = 0;
+int use_unhalted_cycles = 0; /* read unhalted cycles instead of tsc */
+uint64_t rd_cycles_cost = 0; /* cost of reading unhalted cycles */
+uint64_t core_mask = 0; /* bitmap of selected cores */
+
+uint64_t flags = 0; /* flags passed to alloc_mb_mgr() */
+
+uint32_t iter_scale = ITER_SCALE_LONG;
+
+#define PB_INIT_SIZE 50
+#define PB_INIT_IDX 2 /* after \r and [ */
+static uint32_t PB_SIZE = PB_INIT_SIZE;
+static uint32_t PB_FINAL_IDX = (PB_INIT_SIZE + (PB_INIT_IDX - 1));
+static char prog_bar[PB_INIT_SIZE + 4]; /* 50 + 4 for \r, [, ], \0 */
+static uint32_t pb_idx = PB_INIT_IDX;
+static uint32_t pb_mod = 0;
+
+static int silent_progress_bar = 0;
+
+static void prog_bar_init(const uint32_t total_num)
+{
+ if (silent_progress_bar)
+ return;
+
+ if (total_num < PB_SIZE) {
+ PB_SIZE = total_num;
+ PB_FINAL_IDX = (PB_SIZE + (PB_INIT_IDX - 1));
+ }
+ pb_idx = PB_INIT_IDX;
+ pb_mod = total_num / PB_SIZE;
+
+ /* 32 dec == ascii ' ' char */
+ memset(prog_bar, 32, sizeof(prog_bar));
+ prog_bar[0] = '\r';
+ prog_bar[1] = '[';
+ prog_bar[PB_FINAL_IDX + 1] = ']';
+ prog_bar[PB_FINAL_IDX + 2] = '\0';
+
+ fputs(prog_bar, stderr);
+}
+
+static void prog_bar_fini(void)
+{
+ if (silent_progress_bar)
+ return;
+
+ prog_bar[PB_FINAL_IDX] = 'X'; /* set final X */
+ fputs(prog_bar, stderr);
+}
+
+static void prog_bar_update(const uint32_t num)
+{
+ if (silent_progress_bar)
+ return;
+
+ if ((pb_mod == 0) || num % pb_mod == 0) {
+ /* print X at every ~50th variant */
+ prog_bar[pb_idx] = 'X';
+ fputs(prog_bar, stderr);
+
+ /* don't overrun final idx */
+ if (pb_idx < (PB_SIZE + 1))
+ pb_idx++;
+ } else {
+ const char pb_inter_chars[] = {'|', '/', '-', '\\'};
+ /* print intermediate chars */
+ prog_bar[pb_idx] = pb_inter_chars[num % DIM(pb_inter_chars)];
+ fputs(prog_bar, stderr);
+ }
+}
+
+/* Read unhalted cycles */
+__forceinline uint64_t read_cycles(const uint32_t core)
+{
+ uint64_t val = 0;
+
+ if (msr_read(core, IA32_MSR_CPU_UNHALTED_THREAD,
+ &val) != MACHINE_RETVAL_OK) {
+ fprintf(stderr, "Error reading cycles "
+ "counter on core %u!\n", core);
+ exit(EXIT_FAILURE);
+ }
+
+ return val;
+}
+
+/* Method used by qsort to compare 2 values */
+static int compare_uint64_t(const void *a, const void *b)
+{
+ return (int)(int64_t)(*(const uint64_t *)a - *(const uint64_t *)b);
+}
+
+/* Get number of bits set in value */
+static unsigned bitcount(const uint64_t val)
+{
+ unsigned i, bits = 0;
+
+ for (i = 0; i < BITS(val); i++)
+ if (val & (1ULL << i))
+ bits++;
+
+ return bits;
+}
+
+/* Get the next core in core mask
+ Set last_core to negative to start from beginnig of core_mask */
+static int next_core(const uint64_t core_mask,
+ const int last_core)
+{
+ int core = 0;
+
+ if (last_core >= 0)
+ core = last_core;
+
+ while (((core_mask >> core) & 1) == 0) {
+ core++;
+
+ if (core >= (int)BITS(core_mask))
+ return -1;
+ }
+
+ return core;
+}
+
+/* Set CPU affinity for current thread */
+static int set_affinity(const int cpu)
+{
+ int ret = 0;
+ int num_cpus = 0;
+
+ /* Get number of cpus in the system */
+#ifdef _WIN32
+ GROUP_AFFINITY NewGroupAffinity;
+
+ memset(&NewGroupAffinity, 0, sizeof(GROUP_AFFINITY));
+ num_cpus = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+#else
+ num_cpus = sysconf(_SC_NPROCESSORS_CONF);
+#endif
+ if (num_cpus == 0) {
+ fprintf(stderr, "Zero processors in the system!");
+ return 1;
+ }
+
+ /* Check if selected core is valid */
+ if (cpu < 0 || cpu >= num_cpus) {
+ fprintf(stderr, "Invalid CPU selected! "
+ "Max valid CPU is %u\n", num_cpus - 1);
+ return 1;
+ }
+
+#ifdef _WIN32
+ NewGroupAffinity.Mask = 1ULL << cpu;
+ ret = !SetThreadGroupAffinity(GetCurrentThread(),
+ &NewGroupAffinity, NULL);
+#else
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ /* Set affinity of current process to cpu */
+ ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+#endif /* _WIN32 */
+
+ return ret;
+}
+
+/* Start counting unhalted cycles */
+static int start_cycles_ctr(const uint32_t core)
+{
+ int ret;
+
+ if (core >= BITS(core_mask))
+ return 1;
+
+ /* Disable cycles counter */
+ ret = msr_write(core, IA32_MSR_PERF_GLOBAL_CTR, 0);
+ if (ret != MACHINE_RETVAL_OK)
+ return ret;
+
+ /* Zero cycles counter */
+ ret = msr_write(core, IA32_MSR_CPU_UNHALTED_THREAD, 0);
+ if (ret != MACHINE_RETVAL_OK)
+ return ret;
+
+ /* Enable OS and user tracking in FixedCtr1 */
+ ret = msr_write(core, IA32_MSR_FIXED_CTR_CTRL, 0x30);
+ if (ret != MACHINE_RETVAL_OK)
+ return ret;
+
+ /* Enable cycles counter */
+ return msr_write(core, IA32_MSR_PERF_GLOBAL_CTR, (1ULL << 33));
+}
+
+/* Init MSR module */
+static int init_msr_mod(void)
+{
+ unsigned max_core_count = 0;
+#ifdef _WIN32
+ max_core_count = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+#else
+ max_core_count = sysconf(_SC_NPROCESSORS_CONF);
+#endif
+ if (max_core_count == 0) {
+ fprintf(stderr, "Zero processors in the system!");
+ return MACHINE_RETVAL_ERROR;
+ }
+
+ return machine_init(max_core_count);
+}
+
+/* Set the cost of reading unhalted cycles using RDMSR */
+static int set_unhalted_cycle_cost(const int core, uint64_t *value)
+{
+ uint64_t time1, time2;
+
+ if (value == NULL || core < 0)
+ return 1;
+
+ time1 = read_cycles(core);
+ time2 = read_cycles(core);
+
+ /* Calculate delta */
+ *value = (time2 - time1);
+
+ return 0;
+}
+
+/* Calculate the general cost of reading unhalted cycles (median) */
+static int set_avg_unhalted_cycle_cost(const int core, uint64_t *value)
+{
+ unsigned i;
+ uint64_t cycles[10];
+
+ if (value == NULL || core_mask == 0 || core < 0)
+ return 1;
+
+ /* Fill cycles table with read cost values */
+ for (i = 0; i < DIM(cycles); i++)
+ if (set_unhalted_cycle_cost(core, &cycles[i]) != 0)
+ return 1;
+
+ /* sort array */
+ qsort(cycles, DIM(cycles), sizeof(uint64_t), compare_uint64_t);
+
+ /* set median cost */
+ *value = cycles[DIM(cycles)/2];
+
+ return 0;
+}
+
+/* Freeing allocated memory */
+static void free_mem(uint8_t **p_buffer, uint128_t **p_keys)
+{
+ uint128_t *keys = NULL;
+ uint8_t *buf = NULL;
+
+ if (p_keys != NULL) {
+ keys = *p_keys;
+ *p_keys = NULL;
+ }
+
+ if (p_buffer != NULL) {
+ buf = *p_buffer;
+ *p_buffer = NULL;
+ }
+
+#ifdef LINUX
+ if (keys != NULL)
+ free(keys);
+
+ if (buf != NULL)
+ free(buf);
+#else
+ if (keys != NULL)
+ _aligned_free(keys);
+
+ if (buf != NULL)
+ _aligned_free(buf);
+#endif
+}
+
+static const void *
+get_key_pointer(const uint32_t index, const uint128_t *p_keys)
+{
+ return (const void *) &p_keys[key_idxs[index]];
+}
+
+static uint8_t *get_src_buffer(const uint32_t index, uint8_t *p_buffer)
+{
+ return &p_buffer[offsets[index]];
+}
+
+static uint8_t *get_dst_buffer(const uint32_t index, uint8_t *p_buffer)
+{
+ return &p_buffer[offsets[index] + sha_size_incr];
+}
+
+static uint32_t get_next_index(uint32_t index)
+{
+ if (++index >= index_limit)
+ index = 0;
+ return index;
+}
+
+static void init_buf(void *pb, const size_t length)
+{
+ const size_t n = length / sizeof(uint64_t);
+ size_t i = 0;
+
+ if (pb == NULL)
+ return;
+
+ for (i = 0; i < n; i++)
+ ((uint64_t *)pb)[i] = (uint64_t) rand();
+}
+
+/*
+ * Packet and key memory allocation and initialization.
+ * init_offsets() needs to be called prior to that so that
+ * index_limit is set up accordingly to hot/cold selection.
+ */
+static void init_mem(uint8_t **p_buffer, uint128_t **p_keys)
+{
+ const size_t bufs_size = index_limit * REGION_SIZE;
+ const size_t keys_size = index_limit * KEYS_PER_JOB * sizeof(uint128_t);
+ const size_t alignment = 64;
+ uint8_t *buf = NULL;
+ uint128_t *keys = NULL;
+
+ if (p_keys == NULL || p_buffer == NULL) {
+ fprintf(stderr, "Internal buffer allocation error!\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LINUX
+ buf = (uint8_t *) memalign(alignment, bufs_size);
+#else
+ buf = (uint8_t *) _aligned_malloc(bufs_size, alignment);
+#endif
+ if (!buf) {
+ fprintf(stderr, "Could not malloc buf\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LINUX
+ keys = (uint128_t *) memalign(alignment, keys_size);
+#else
+ keys = (uint128_t *) _aligned_malloc(keys_size, alignment);
+#endif
+ if (!keys) {
+ fprintf(stderr, "Could not allocate memory for keys!\n");
+ free_mem(&buf, &keys);
+ exit(EXIT_FAILURE);
+ }
+
+ *p_keys = keys;
+ *p_buffer = buf;
+
+ init_buf(buf, bufs_size);
+ init_buf(keys, keys_size);
+}
+
+/*
+ * Initialize packet buffer and keys offsets from
+ * the start of the respective buffers
+ */
+static void init_offsets(const enum cache_type_e ctype)
+{
+ if (ctype == COLD) {
+ uint32_t i;
+
+ for (i = 0; i < NUM_OFFSETS; i++) {
+ offsets[i] = (i * REGION_SIZE) + (rand() & 0x3C0);
+ key_idxs[i] = i * KEYS_PER_JOB;
+ }
+
+ /* swap the entries at random */
+ for (i = 0; i < NUM_OFFSETS; i++) {
+ const uint32_t swap_idx = (rand() % NUM_OFFSETS);
+ const uint32_t tmp_offset = offsets[swap_idx];
+ const uint32_t tmp_keyidx = key_idxs[swap_idx];
+
+ offsets[swap_idx] = offsets[i];
+ key_idxs[swap_idx] = key_idxs[i];
+ offsets[i] = tmp_offset;
+ key_idxs[i] = tmp_keyidx;
+ }
+
+ index_limit = NUM_OFFSETS;
+ } else { /* WARM */
+ uint32_t i;
+
+ index_limit = 16;
+
+ for (i = 0; i < index_limit; i++) {
+ /*
+ * Each buffer starts at different offset from
+ * start of the page.
+ * The most optimum determined difference between
+ * offsets is 4 cache lines.
+ */
+ const uint32_t offset_step = (4 * 64);
+ const uint32_t L1_way_size = 4096;
+
+ key_idxs[i] = i * KEYS_PER_JOB;
+ offsets[i] = i * REGION_SIZE +
+ ((i * offset_step) & (L1_way_size - 1));
+ }
+ }
+}
+
+/*
+ * This function translates enum test_ciper_mode_e to be used by ipsec_mb
+ * library
+ */
+static JOB_CIPHER_MODE
+translate_cipher_mode(const enum test_cipher_mode_e test_mode)
+{
+ JOB_CIPHER_MODE c_mode = NULL_CIPHER;
+
+ switch (test_mode) {
+ case TEST_CBC:
+ c_mode = CBC;
+ break;
+ case TEST_CNTR:
+ case TEST_CNTR8:
+ c_mode = CNTR;
+ break;
+ case TEST_CNTR_BITLEN:
+ case TEST_CNTR_BITLEN4:
+ c_mode = CNTR_BITLEN;
+ break;
+ case TEST_ECB:
+ c_mode = ECB;
+ break;
+ case TEST_NULL_CIPHER:
+ c_mode = NULL_CIPHER;
+ break;
+ case TEST_AESDOCSIS:
+ case TEST_AESDOCSIS8:
+ c_mode = DOCSIS_SEC_BPI;
+ break;
+ case TEST_DESDOCSIS:
+ case TEST_DESDOCSIS4:
+ c_mode = DOCSIS_DES;
+ break;
+ case TEST_GCM:
+ c_mode = GCM;
+ break;
+ case TEST_CCM:
+ c_mode = CCM;
+ break;
+ case TEST_DES:
+ c_mode = DES;
+ break;
+ case TEST_3DES:
+ c_mode = DES3;
+ break;
+ case TEST_PON_CNTR:
+ case TEST_PON_NO_CNTR:
+ c_mode = PON_AES_CNTR;
+ break;
+ default:
+ break;
+ }
+ return c_mode;
+}
+
+/* Performs test using AES_HMAC or DOCSIS */
+static uint64_t
+do_test(MB_MGR *mb_mgr, struct params_s *params,
+ const uint32_t num_iter, uint8_t *p_buffer, uint128_t *p_keys)
+{
+ JOB_AES_HMAC *job;
+ JOB_AES_HMAC job_template;
+ uint32_t i;
+ static uint32_t index = 0;
+ static DECLARE_ALIGNED(uint128_t iv, 16);
+ static uint32_t ipad[5], opad[5], digest[3];
+ static DECLARE_ALIGNED(uint32_t k1_expanded[11 * 4], 16);
+ static DECLARE_ALIGNED(uint8_t k2[16], 16);
+ static DECLARE_ALIGNED(uint8_t k3[16], 16);
+ static DECLARE_ALIGNED(struct gcm_key_data gdata_key, 512);
+ uint64_t xgem_hdr = 0;
+ uint32_t size_aes;
+ uint64_t time = 0;
+ uint32_t aux;
+
+ if ((params->cipher_mode == TEST_AESDOCSIS8) ||
+ (params->cipher_mode == TEST_CNTR8))
+ size_aes = params->size_aes + 8;
+ else if (params->cipher_mode == TEST_DESDOCSIS4)
+ size_aes = params->size_aes + 4;
+ else
+ size_aes = params->size_aes;
+
+ if (params->cipher_mode == TEST_CNTR_BITLEN)
+ job_template.msg_len_to_cipher_in_bits = size_aes * 8;
+ else if (params->cipher_mode == TEST_CNTR_BITLEN4)
+ job_template.msg_len_to_cipher_in_bits = size_aes * 8 - 4;
+ else
+ job_template.msg_len_to_cipher_in_bytes = size_aes;
+
+ job_template.msg_len_to_hash_in_bytes = size_aes + sha_size_incr;
+ job_template.hash_start_src_offset_in_bytes = 0;
+ job_template.cipher_start_src_offset_in_bytes = sha_size_incr;
+ job_template.iv = (uint8_t *) &iv;
+ job_template.iv_len_in_bytes = 16;
+
+ job_template.auth_tag_output = (uint8_t *) digest;
+
+ switch (params->hash_alg) {
+ case TEST_XCBC:
+ job_template.u.XCBC._k1_expanded = k1_expanded;
+ job_template.u.XCBC._k2 = k2;
+ job_template.u.XCBC._k3 = k3;
+ job_template.hash_alg = AES_XCBC;
+ break;
+ case TEST_HASH_CCM:
+ job_template.hash_alg = AES_CCM;
+ break;
+ case TEST_HASH_GCM:
+ job_template.hash_alg = AES_GMAC;
+ break;
+ case TEST_NULL_HASH:
+ job_template.hash_alg = NULL_HASH;
+ break;
+ case TEST_HASH_CMAC:
+ job_template.u.CMAC._key_expanded = k1_expanded;
+ job_template.u.CMAC._skey1 = k2;
+ job_template.u.CMAC._skey2 = k3;
+ job_template.hash_alg = AES_CMAC;
+ break;
+ case TEST_HASH_CMAC_BITLEN:
+ job_template.u.CMAC._key_expanded = k1_expanded;
+ job_template.u.CMAC._skey1 = k2;
+ job_template.u.CMAC._skey2 = k3;
+ /*
+ * CMAC bit level version is done in bits (length is
+ * converted to bits and it is decreased by 4 bits,
+ * to force the CMAC bitlen path)
+ */
+ job_template.msg_len_to_hash_in_bits =
+ (job_template.msg_len_to_hash_in_bytes * 8) - 4;
+ job_template.hash_alg = AES_CMAC_BITLEN;
+ break;
+ case TEST_PON_CRC_BIP:
+ job_template.hash_alg = PON_CRC_BIP;
+ job_template.msg_len_to_hash_in_bytes = size_aes + 8;
+ job_template.cipher_start_src_offset_in_bytes = 8;
+ if (params->cipher_mode == TEST_PON_NO_CNTR)
+ job_template.msg_len_to_cipher_in_bytes = 0;
+ break;
+ default:
+ /* HMAC hash alg is SHA1 or MD5 */
+ job_template.u.HMAC._hashed_auth_key_xor_ipad =
+ (uint8_t *) ipad;
+ job_template.u.HMAC._hashed_auth_key_xor_opad =
+ (uint8_t *) opad;
+ job_template.hash_alg = (JOB_HASH_ALG) params->hash_alg;
+ break;
+ }
+ job_template.auth_tag_output_len_in_bytes =
+ (uint64_t) auth_tag_length_bytes[job_template.hash_alg - 1];
+
+ job_template.cipher_direction = params->cipher_dir;
+
+ if (params->cipher_mode == TEST_NULL_CIPHER) {
+ job_template.chain_order = HASH_CIPHER;
+ } else if (params->cipher_mode == TEST_CCM) {
+ if (job_template.cipher_direction == ENCRYPT)
+ job_template.chain_order = HASH_CIPHER;
+ else
+ job_template.chain_order = CIPHER_HASH;
+ } else {
+ if (job_template.cipher_direction == ENCRYPT)
+ job_template.chain_order = CIPHER_HASH;
+ else
+ job_template.chain_order = HASH_CIPHER;
+ }
+
+ /* Translating enum to the API's one */
+ job_template.cipher_mode = translate_cipher_mode(params->cipher_mode);
+ job_template.aes_key_len_in_bytes = params->aes_key_size;
+ if (job_template.cipher_mode == GCM) {
+ uint8_t key[32];
+
+ switch (params->aes_key_size) {
+ case AES_128_BYTES:
+ IMB_AES128_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ case AES_192_BYTES:
+ IMB_AES192_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ case AES_256_BYTES:
+ default:
+ IMB_AES256_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ }
+ job_template.aes_enc_key_expanded = &gdata_key;
+ job_template.aes_dec_key_expanded = &gdata_key;
+ job_template.u.GCM.aad_len_in_bytes = params->aad_size;
+ job_template.iv_len_in_bytes = 12;
+ } else if (job_template.cipher_mode == CCM) {
+ job_template.msg_len_to_cipher_in_bytes = size_aes;
+ job_template.msg_len_to_hash_in_bytes = size_aes;
+ job_template.hash_start_src_offset_in_bytes = 0;
+ job_template.cipher_start_src_offset_in_bytes = 0;
+ job_template.u.CCM.aad_len_in_bytes = params->aad_size;
+ job_template.iv_len_in_bytes = 13;
+ } else if (job_template.cipher_mode == DES ||
+ job_template.cipher_mode == DOCSIS_DES) {
+ job_template.aes_key_len_in_bytes = 8;
+ job_template.iv_len_in_bytes = 8;
+ } else if (job_template.cipher_mode == DES3) {
+ job_template.aes_key_len_in_bytes = 24;
+ job_template.iv_len_in_bytes = 8;
+ }
+
+
+ if (job_template.hash_alg == PON_CRC_BIP) {
+ /* create XGEM header template */
+ const uint64_t pli =
+ (job_template.msg_len_to_cipher_in_bytes << 2) & 0xffff;
+
+ xgem_hdr = ((pli >> 8) & 0xff) | ((pli & 0xff) << 8);
+ }
+
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = read_cycles(params->core);
+ else
+#endif
+ time = __rdtscp(&aux);
+
+ for (i = 0; i < num_iter; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ *job = job_template;
+
+ if (job->hash_alg == PON_CRC_BIP) {
+ uint64_t *p_src =
+ (uint64_t *) get_src_buffer(index, p_buffer);
+
+ job->src = (const uint8_t *)p_src;
+ p_src[0] = xgem_hdr;
+ } else {
+ job->src = get_src_buffer(index, p_buffer);
+ }
+ job->dst = get_dst_buffer(index, p_buffer);
+ if (job->cipher_mode == GCM) {
+ job->u.GCM.aad = job->src;
+ } else if (job->cipher_mode == CCM) {
+ job->u.CCM.aad = job->src;
+ job->aes_enc_key_expanded = job->aes_dec_key_expanded =
+ (const uint32_t *) get_key_pointer(index,
+ p_keys);
+ } else if (job->cipher_mode == DES3) {
+ static const void *ks_ptr[3];
+
+ ks_ptr[0] = ks_ptr[1] = ks_ptr[2] =
+ get_key_pointer(index, p_keys);
+ job->aes_enc_key_expanded =
+ job->aes_dec_key_expanded = ks_ptr;
+ } else {
+ job->aes_enc_key_expanded = job->aes_dec_key_expanded =
+ (const uint32_t *) get_key_pointer(index,
+ p_keys);
+ }
+
+ index = get_next_index(index);
+#ifdef DEBUG
+ job = IMB_SUBMIT_JOB(mb_mgr);
+#else
+ job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
+#endif
+ while (job) {
+#ifdef DEBUG
+ if (job->status != STS_COMPLETED)
+ fprintf(stderr, "failed job, status:%d\n",
+ job->status);
+#endif
+ job = IMB_GET_COMPLETED_JOB(mb_mgr);
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr))) {
+#ifdef DEBUG
+ if (job->status != STS_COMPLETED)
+ fprintf(stderr, "failed job, status:%d\n", job->status);
+#endif
+ }
+
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = (read_cycles(params->core) - rd_cycles_cost) - time;
+ else
+#endif
+ time = __rdtscp(&aux) - time;
+
+ return time / num_iter;
+}
+
+/* Performs test using GCM */
+static uint64_t
+do_test_gcm(struct params_s *params,
+ const uint32_t num_iter, MB_MGR *mb_mgr,
+ uint8_t *p_buffer, uint128_t *p_keys)
+{
+ static DECLARE_ALIGNED(struct gcm_key_data gdata_key, 512);
+ static DECLARE_ALIGNED(struct gcm_context_data gdata_ctx, 64);
+ uint8_t *key;
+ static uint32_t index = 0;
+ uint32_t size_aes = params->size_aes;
+ uint32_t i;
+ uint8_t *aad = NULL;
+ uint8_t auth_tag[12];
+ DECLARE_ALIGNED(uint8_t iv[16], 16);
+ uint64_t time = 0;
+ uint32_t aux;
+
+ key = (uint8_t *) malloc(sizeof(uint8_t) * params->aes_key_size);
+ if (!key) {
+ fprintf(stderr, "Could not malloc key\n");
+ free_mem(&p_buffer, &p_keys);
+ exit(EXIT_FAILURE);
+ }
+
+ aad = (uint8_t *) malloc(sizeof(uint8_t) * params->aad_size);
+ if (!aad) {
+ free(key);
+ fprintf(stderr, "Could not malloc AAD\n");
+ free_mem(&p_buffer, &p_keys);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (params->aes_key_size) {
+ case AES_128_BYTES:
+ IMB_AES128_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ case AES_192_BYTES:
+ IMB_AES192_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ case AES_256_BYTES:
+ default:
+ IMB_AES256_GCM_PRE(mb_mgr, key, &gdata_key);
+ break;
+ }
+
+ if (params->cipher_dir == ENCRYPT) {
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = read_cycles(params->core);
+ else
+#endif
+ time = __rdtscp(&aux);
+
+ if (params->aes_key_size == AES_128_BYTES) {
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES128_GCM_ENC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ } else if (params->aes_key_size == AES_192_BYTES) {
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES192_GCM_ENC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ } else { /* 256 */
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES256_GCM_ENC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ }
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = (read_cycles(params->core) -
+ rd_cycles_cost) - time;
+ else
+#endif
+ time = __rdtscp(&aux) - time;
+ } else { /*DECRYPT*/
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = read_cycles(params->core);
+ else
+#endif
+ time = __rdtscp(&aux);
+
+ if (params->aes_key_size == AES_128_BYTES) {
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES128_GCM_DEC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ } else if (params->aes_key_size == AES_192_BYTES) {
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES192_GCM_DEC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ } else { /* 256 */
+ for (i = 0; i < num_iter; i++) {
+ uint8_t *pb = get_dst_buffer(index, p_buffer);
+
+ IMB_AES256_GCM_DEC(mb_mgr, &gdata_key,
+ &gdata_ctx,
+ pb,
+ pb,
+ size_aes, iv,
+ aad, params->aad_size,
+ auth_tag, sizeof(auth_tag));
+ index = get_next_index(index);
+ }
+ }
+#ifndef _WIN32
+ if (use_unhalted_cycles)
+ time = (read_cycles(params->core) -
+ rd_cycles_cost) - time;
+ else
+#endif
+ time = __rdtscp(&aux) - time;
+ }
+
+ free(key);
+ free(aad);
+ return time / num_iter;
+}
+
+/* Computes mean of set of times after dropping bottom and top quarters */
+static uint64_t
+mean_median(uint64_t *array, uint32_t size,
+ uint8_t *p_buffer, uint128_t *p_keys)
+{
+ const uint32_t quarter = size / 4;
+ uint32_t i;
+ uint64_t sum;
+
+ /* these are single threaded runs, so we skip
+ * the hardware thread related skew clipping
+ * thus skipping "ignore first and last eighth"
+ */
+
+ /* ignore lowest and highest quarter */
+ qsort(array, size, sizeof(uint64_t), compare_uint64_t);
+
+ /* dropping the bottom and top quarters
+ * after sorting to remove noise/variations
+ */
+ array += quarter;
+ size -= quarter * 2;
+
+
+ if ((size == 0) || (size & 0x80000000)) {
+ fprintf(stderr, "Not enough data points!\n");
+ free_mem(&p_buffer, &p_keys);
+ exit(EXIT_FAILURE);
+ }
+ sum = 0;
+ for (i = 0; i < size; i++)
+ sum += array[i];
+
+ sum = (sum + size / 2) / size;
+ return sum;
+}
+
+/* Runs test for each buffer size and stores averaged execution time */
+static void
+process_variant(MB_MGR *mgr, const uint32_t arch, struct params_s *params,
+ struct variant_s *variant_ptr, const uint32_t run,
+ uint8_t *p_buffer, uint128_t *p_keys)
+{
+ const uint32_t sizes = params->num_sizes;
+ uint64_t *times = &variant_ptr->avg_times[run];
+ uint32_t sz;
+
+ for (sz = 0; sz < sizes; sz++) {
+ const uint32_t size_aes = job_sizes[RANGE_MIN] +
+ (sz * job_sizes[RANGE_STEP]);
+ uint32_t num_iter;
+
+ params->aad_size = 0;
+ if (params->cipher_mode == TEST_GCM)
+ params->aad_size = gcm_aad_size;
+
+ if (params->cipher_mode == TEST_CCM)
+ params->aad_size = ccm_aad_size;
+
+ /*
+ * If job size == 0, check AAD size
+ * (only allowed for GCM/CCM)
+ */
+ if (size_aes == 0 && params->aad_size != 0)
+ num_iter = (iter_scale >= (uint32_t)params->aad_size) ?
+ (iter_scale / (uint32_t)params->aad_size) :
+ 1;
+ else if (size_aes != 0)
+ num_iter = (iter_scale >= size_aes) ?
+ (iter_scale / size_aes) : 1;
+ else
+ num_iter = iter_scale;
+
+ params->size_aes = size_aes;
+ if (params->cipher_mode == TEST_GCM && (!use_gcm_job_api)) {
+ if (job_iter == 0)
+ *times = do_test_gcm(params, 2 * num_iter, mgr,
+ p_buffer, p_keys);
+ else
+ *times = do_test_gcm(params, job_iter, mgr,
+ p_buffer, p_keys);
+ } else {
+ if (job_iter == 0)
+ *times = do_test(mgr, params, num_iter,
+ p_buffer, p_keys);
+ else
+ *times = do_test(mgr, params, job_iter,
+ p_buffer, p_keys);
+ }
+ times += NUM_RUNS;
+ }
+
+ variant_ptr->params = *params;
+ variant_ptr->arch = arch;
+}
+
+/* Sets cipher mode, hash algorithm */
+static void
+do_variants(MB_MGR *mgr, const uint32_t arch, struct params_s *params,
+ const uint32_t run, struct variant_s **variant_ptr,
+ uint32_t *variant, uint8_t *p_buffer, uint128_t *p_keys,
+ const int print_info)
+{
+ uint32_t hash_alg;
+ uint32_t h_start = TEST_SHA1;
+ uint32_t h_end = TEST_NULL_HASH;
+ uint32_t c_mode;
+ uint32_t c_start = TEST_CBC;
+ uint32_t c_end = TEST_NULL_CIPHER;
+
+ switch (params->test_type) {
+ case TTYPE_AES_DOCSIS:
+ h_start = TEST_NULL_HASH;
+ c_start = TEST_AESDOCSIS;
+ c_end = TEST_DESDOCSIS4;
+ break;
+ case TTYPE_AES_GCM:
+ h_start = TEST_HASH_GCM;
+ h_end = TEST_HASH_GCM;
+ c_start = TEST_GCM;
+ c_end = TEST_GCM;
+ break;
+ case TTYPE_AES_CCM:
+ h_start = TEST_HASH_CCM;
+ h_end = TEST_HASH_CCM;
+ c_start = TEST_CCM;
+ c_end = TEST_CCM;
+ break;
+ case TTYPE_AES_DES:
+ h_start = TEST_NULL_HASH;
+ h_end = TEST_NULL_HASH;
+ c_start = TEST_DES;
+ c_end = TEST_DES;
+ break;
+ case TTYPE_AES_3DES:
+ h_start = TEST_NULL_HASH;
+ h_end = TEST_NULL_HASH;
+ c_start = TEST_3DES;
+ c_end = TEST_3DES;
+ break;
+ case TTYPE_PON:
+ h_start = TEST_PON_CRC_BIP;
+ h_end = TEST_PON_CRC_BIP;
+ c_start = TEST_PON_CNTR;
+ c_end = TEST_PON_NO_CNTR;
+ break;
+ case TTYPE_CUSTOM:
+ h_start = params->hash_alg;
+ h_end = params->hash_alg;
+ c_start = params->cipher_mode;
+ c_end = params->cipher_mode;
+ break;
+ default:
+ break;
+ }
+
+ for (c_mode = c_start; c_mode <= c_end; c_mode++) {
+ params->cipher_mode = (enum test_cipher_mode_e) c_mode;
+ for (hash_alg = h_start; hash_alg <= h_end; hash_alg++) {
+ params->hash_alg = (enum test_hash_alg_e) hash_alg;
+ process_variant(mgr, arch, params, *variant_ptr, run,
+ p_buffer, p_keys);
+ /* update and print progress bar */
+ if (print_info)
+ prog_bar_update(*variant);
+ (*variant)++;
+ (*variant_ptr)++;
+ }
+ }
+}
+
+/* Sets cipher direction and key size */
+static void
+run_dir_test(MB_MGR *mgr, const uint32_t arch, struct params_s *params,
+ const uint32_t run, struct variant_s **variant_ptr,
+ uint32_t *variant, uint8_t *p_buffer, uint128_t *p_keys,
+ const int print_info)
+{
+ uint32_t dir;
+ uint32_t k; /* Key size */
+ uint32_t limit = AES_256_BYTES; /* Key size value limit */
+
+ if (params->test_type == TTYPE_AES_DOCSIS ||
+ params->test_type == TTYPE_AES_DES ||
+ params->test_type == TTYPE_AES_3DES ||
+ params->test_type == TTYPE_PON ||
+ params->test_type == TTYPE_AES_CCM)
+ limit = AES_128_BYTES;
+
+ switch (arch) {
+ case 0:
+ init_mb_mgr_sse(mgr);
+ break;
+ case 1:
+ init_mb_mgr_avx(mgr);
+ break;
+ case 2:
+ init_mb_mgr_avx2(mgr);
+ break;
+ default:
+ case 3:
+ init_mb_mgr_avx512(mgr);
+ break;
+ }
+
+ if (params->test_type == TTYPE_CUSTOM) {
+ params->cipher_dir = custom_job_params.cipher_dir;
+ params->aes_key_size = custom_job_params.aes_key_size;
+ params->cipher_mode = custom_job_params.cipher_mode;
+ params->hash_alg = custom_job_params.hash_alg;
+ do_variants(mgr, arch, params, run, variant_ptr,
+ variant, p_buffer, p_keys, print_info);
+ return;
+ }
+
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++) {
+ params->cipher_dir = (JOB_CIPHER_DIRECTION) dir;
+ for (k = AES_128_BYTES; k <= limit; k += 8) {
+ params->aes_key_size = k;
+ do_variants(mgr, arch, params, run, variant_ptr,
+ variant, p_buffer, p_keys, print_info);
+ }
+ }
+}
+
+/* Generates output containing averaged times for each test variant */
+static void
+print_times(struct variant_s *variant_list, struct params_s *params,
+ const uint32_t total_variants, uint8_t *p_buffer, uint128_t *p_keys)
+{
+ const uint32_t sizes = params->num_sizes;
+ uint32_t col;
+ uint32_t sz;
+
+ /* Temporary variables */
+ struct params_s par;
+ uint8_t c_mode;
+ uint8_t c_dir;
+ uint8_t h_alg;
+ const char *func_names[4] = {
+ "SSE", "AVX", "AVX2", "AVX512"
+ };
+ const char *c_mode_names[TEST_NUM_CIPHER_TESTS - 1] = {
+ "CBC", "CNTR", "CNTR+8", "CNTR_BITLEN", "CNTR_BITLEN4", "ECB",
+ "NULL_CIPHER", "DOCAES", "DOCAES+8", "DOCDES", "DOCDES+4",
+ "GCM", "CCM", "DES", "3DES", "PON", "PON_NO_CTR"
+ };
+ const char *c_dir_names[2] = {
+ "ENCRYPT", "DECRYPT"
+ };
+ const char *h_alg_names[TEST_NUM_HASH_TESTS - 1] = {
+ "SHA1", "SHA_224", "SHA_256", "SHA_384", "SHA_512", "XCBC",
+ "MD5", "CMAC", "CMAC_BITLEN", "NULL_HASH", "GCM", "CUSTOM",
+ "CCM", "BIP-CRC32"
+ };
+ printf("ARCH");
+ for (col = 0; col < total_variants; col++)
+ printf("\t%s", func_names[variant_list[col].arch]);
+ printf("\n");
+ printf("CIPHER");
+ for (col = 0; col < total_variants; col++) {
+ par = variant_list[col].params;
+ c_mode = par.cipher_mode - TEST_CBC;
+ printf("\t%s", c_mode_names[c_mode]);
+ }
+ printf("\n");
+ printf("DIR");
+ for (col = 0; col < total_variants; col++) {
+ par = variant_list[col].params;
+ c_dir = par.cipher_dir - ENCRYPT;
+ printf("\t%s", c_dir_names[c_dir]);
+ }
+ printf("\n");
+ printf("HASH_ALG");
+ for (col = 0; col < total_variants; col++) {
+ par = variant_list[col].params;
+ h_alg = par.hash_alg - TEST_SHA1;
+ printf("\t%s", h_alg_names[h_alg]);
+ }
+ printf("\n");
+ printf("KEY_SIZE");
+ for (col = 0; col < total_variants; col++) {
+ par = variant_list[col].params;
+ printf("\tAES-%u", par.aes_key_size * 8);
+ }
+ printf("\n");
+ for (sz = 0; sz < sizes; sz++) {
+ printf("%d", job_sizes[RANGE_MIN] +
+ (sz * job_sizes[RANGE_STEP]));
+ for (col = 0; col < total_variants; col++) {
+ uint64_t *time_ptr =
+ &variant_list[col].avg_times[sz * NUM_RUNS];
+ const unsigned long long val =
+ mean_median(time_ptr, NUM_RUNS,
+ p_buffer, p_keys);
+
+ printf("\t%llu", val);
+ }
+ printf("\n");
+ }
+}
+
+/* Prepares data structure for test variants storage, sets test configuration */
+#ifdef _WIN32
+static void
+#else
+static void *
+#endif
+run_tests(void *arg)
+{
+ uint32_t i;
+ struct thread_info *info = (struct thread_info *)arg;
+ MB_MGR *p_mgr = NULL;
+ struct params_s params;
+ uint32_t num_variants[NUM_TTYPES] = {0};
+ uint32_t type, at_size, run, arch;
+ uint32_t variants_per_arch, max_arch;
+ uint32_t variant;
+ uint32_t total_variants = 0;
+ struct variant_s *variant_ptr = NULL;
+ struct variant_s *variant_list = NULL;
+ const uint32_t min_size = job_sizes[RANGE_MIN];
+ const uint32_t max_size = job_sizes[RANGE_MAX];
+ const uint32_t step_size = job_sizes[RANGE_STEP];
+ uint8_t *buf = NULL;
+ uint128_t *keys = NULL;
+
+ p_mgr = info->p_mgr;
+
+ params.num_sizes = ((max_size - min_size) / step_size) + 1;
+
+ params.core = (uint32_t)info->core;
+
+ /* if cores selected then set affinity */
+ if (core_mask)
+ if (set_affinity(info->core) != 0) {
+ fprintf(stderr, "Failed to set cpu "
+ "affinity on core %d\n", info->core);
+ goto exit_failure;
+ }
+
+ /* If unhalted cycles selected and this is
+ the primary thread then start counter */
+ if (use_unhalted_cycles && info->print_info) {
+ int ret;
+
+ ret = start_cycles_ctr(params.core);
+ if (ret != 0) {
+ fprintf(stderr, "Failed to start cycles "
+ "counter on core %u\n", params.core);
+ goto exit_failure;
+ }
+ /* Get average cost of reading counter */
+ ret = set_avg_unhalted_cycle_cost(params.core, &rd_cycles_cost);
+ if (ret != 0 || rd_cycles_cost == 0) {
+ fprintf(stderr, "Error calculating unhalted "
+ "cycles read overhead!\n");
+ goto exit_failure;
+ } else
+ fprintf(stderr, "Started counting unhalted cycles on "
+ "core %d\nUnhalted cycles read cost = %lu "
+ "cycles\n", params.core,
+ (unsigned long)rd_cycles_cost);
+ }
+
+ init_mem(&buf, &keys);
+
+ for (type = TTYPE_AES_HMAC; type < NUM_TTYPES; type++) {
+ if (test_types[type] == 0)
+ continue;
+
+ switch (type) {
+ default:
+ case TTYPE_AES_HMAC:
+ variants_per_arch = VARIANTS_PER_ARCH_AES;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_AES_DOCSIS:
+ variants_per_arch = VARIANTS_PER_ARCH_DOCSIS;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_AES_GCM:
+ variants_per_arch = VARIANTS_PER_ARCH_GCM;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_AES_CCM:
+ variants_per_arch = VARIANTS_PER_ARCH_CCM;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_AES_DES:
+ variants_per_arch = VARIANTS_PER_ARCH_DES;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_AES_3DES:
+ variants_per_arch = VARIANTS_PER_ARCH_3DES;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_PON:
+ variants_per_arch = VARIANTS_PER_ARCH_PON;
+ max_arch = NUM_ARCHS;
+ break;
+ case TTYPE_CUSTOM:
+ variants_per_arch = 1;
+ max_arch = NUM_ARCHS;
+ break;
+ }
+
+ /* Calculating number of all variants */
+ for (arch = 0; arch < max_arch; arch++) {
+ if (archs[arch] == 0)
+ continue;
+ num_variants[type] += variants_per_arch;
+ }
+ total_variants += num_variants[type];
+ }
+
+ if (total_variants == 0) {
+ fprintf(stderr, "No tests to be run\n");
+ goto exit;
+ }
+
+ if (info->print_info && !silent_progress_bar)
+ fprintf(stderr, "Total number of combinations (algos, "
+ "key sizes, cipher directions) to test = %u\n",
+ total_variants);
+
+ variant_list = (struct variant_s *)
+ malloc(total_variants * sizeof(struct variant_s));
+ if (variant_list == NULL) {
+ fprintf(stderr, "Cannot allocate memory\n");
+ goto exit_failure;
+ }
+
+ at_size = NUM_RUNS * params.num_sizes * sizeof(uint64_t);
+ for (variant = 0, variant_ptr = variant_list;
+ variant < total_variants;
+ variant++, variant_ptr++) {
+ variant_ptr->avg_times = (uint64_t *) malloc(at_size);
+ if (!variant_ptr->avg_times) {
+ fprintf(stderr, "Cannot allocate memory\n");
+ goto exit_failure;
+ }
+ }
+
+ for (run = 0; run < NUM_RUNS; run++) {
+ if (info->print_info)
+ fprintf(stderr, "\nStarting run %d of %d%c",
+ run+1, NUM_RUNS,
+ silent_progress_bar ? '\r' : '\n' );
+
+ variant = 0;
+ variant_ptr = variant_list;
+
+ if (iter_scale == ITER_SCALE_SMOKE && run != 0)
+ continue;
+
+ if (info->print_info)
+ prog_bar_init(total_variants);
+
+ for (type = TTYPE_AES_HMAC; type < NUM_TTYPES; type++) {
+ if (test_types[type] == 0)
+ continue;
+
+ max_arch = NUM_ARCHS;
+
+ params.num_variants = num_variants[type];
+ params.test_type = type;
+ /* Performing tests for each selected architecture */
+ for (arch = 0; arch < max_arch; arch++) {
+ if (archs[arch] == 0)
+ continue;
+ run_dir_test(p_mgr, arch, &params, run,
+ &variant_ptr, &variant, buf,
+ keys, info->print_info);
+ }
+ } /* end for type */
+ if (info->print_info)
+ prog_bar_fini();
+
+ } /* end for run */
+ if (info->print_info == 1 && iter_scale != ITER_SCALE_SMOKE) {
+ fprintf(stderr, "\n");
+ print_times(variant_list, &params, total_variants, buf, keys);
+ }
+
+exit:
+ if (variant_list != NULL) {
+ /* Freeing variants list */
+ for (i = 0; i < total_variants; i++)
+ free(variant_list[i].avg_times);
+ free(variant_list);
+ }
+ free_mem(&buf, &keys);
+ free_mb_mgr(p_mgr);
+#ifndef _WIN32
+ return NULL;
+
+#else
+ return;
+#endif
+exit_failure:
+ if (variant_list != NULL)
+ free(variant_list);
+ free_mem(&buf, &keys);
+ free_mb_mgr(p_mgr);
+ exit(EXIT_FAILURE);
+}
+
+static void usage(void)
+{
+ fprintf(stderr, "Usage: ipsec_perf [args], "
+ "where args are zero or more\n"
+ "-h: print this message\n"
+ "-c: Use cold cache, it uses warm as default\n"
+ "-w: Use warm cache\n"
+ "--arch: run only tests on specified architecture (SSE/AVX/AVX2/AVX512)\n"
+ "--cipher-algo: Select cipher algorithm to run on the custom test\n"
+ "--cipher-dir: Select cipher direction to run on the custom test "
+ "(encrypt/decrypt) (default = encrypt)\n"
+ "--hash-algo: Select hash algorithm to run on the custom test\n"
+ "--aead-algo: Select AEAD algorithm to run on the custom test\n"
+ "--no-avx512: Don't do AVX512\n"
+ "--no-avx2: Don't do AVX2\n"
+ "--no-avx: Don't do AVX\n"
+ "--no-sse: Don't do SSE\n"
+ "-o val: Use <val> for the SHA size increment, default is 24\n"
+ "--shani-on: use SHA extensions, default: auto-detect\n"
+ "--shani-off: don't use SHA extensions\n"
+ "--no-gcm: do not run GCM perf tests\n"
+ "--no-aes: do not run standard AES + HMAC perf tests\n"
+ "--no-docsis: do not run DOCSIS cipher perf tests\n"
+ "--no-ccm: do not run CCM cipher perf tests\n"
+ "--no-des: do not run DES cipher perf tests\n"
+ "--no-3des: do not run 3DES cipher perf tests\n"
+ "--no-pon: do not run PON cipher perf tests\n"
+ "--gcm-job-api: use JOB API for GCM perf tests"
+ " (raw GCM API is default)\n"
+ "--threads num: <num> for the number of threads to run"
+ " Max: %d\n"
+ "--cores mask: <mask> CPU's to run threads\n"
+ "--unhalted-cycles: measure using unhalted cycles (requires root).\n"
+ " Note: RDTSC is used by default.\n"
+ "--quick: reduces number of test iterations by x10\n"
+ " (less precise but quicker)\n"
+ "--smoke: very quick, unprecise and without print out\n"
+ " (for validation only)\n"
+ "--job-size: size of the cipher & MAC job in bytes. It can be:\n"
+ " - single value: test single size\n"
+ " - range: test multiple sizes with following format"
+ " min:step:max (e.g. 16:16:256)\n"
+ " (-o still applies for MAC)\n"
+ "--aad-size: size of AAD for AEAD algorithms\n"
+ "--job-iter: number of tests iterations for each job size\n"
+ "--no-progress-bar: Don't display progress bar\n",
+ MAX_NUM_THREADS + 1);
+}
+
+static int
+get_next_num_arg(const char * const *argv, const int index, const int argc,
+ void *dst, const size_t dst_size)
+{
+ char *endptr = NULL;
+ uint64_t val;
+
+ if (dst == NULL || argv == NULL || index < 0 || argc < 0) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (index >= (argc - 1)) {
+ fprintf(stderr, "'%s' requires an argument!\n", argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef _WIN32
+ val = _strtoui64(argv[index + 1], &endptr, 0);
+#else
+ val = strtoull(argv[index + 1], &endptr, 0);
+#endif
+ if (endptr == argv[index + 1] || (endptr != NULL && *endptr != '\0')) {
+ fprintf(stderr, "Error converting '%s' as value for '%s'!\n",
+ argv[index + 1], argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (dst_size) {
+ case (sizeof(uint8_t)):
+ *((uint8_t *)dst) = (uint8_t) val;
+ break;
+ case (sizeof(uint16_t)):
+ *((uint16_t *)dst) = (uint16_t) val;
+ break;
+ case (sizeof(uint32_t)):
+ *((uint32_t *)dst) = (uint32_t) val;
+ break;
+ case (sizeof(uint64_t)):
+ *((uint64_t *)dst) = val;
+ break;
+ default:
+ fprintf(stderr, "%s() invalid dst_size %u!\n",
+ __func__, (unsigned) dst_size);
+ exit(EXIT_FAILURE);
+ break;
+ }
+
+ return index + 1;
+}
+
+static int
+detect_arch(unsigned int arch_support[NUM_ARCHS])
+{
+ const uint64_t detect_sse =
+ IMB_FEATURE_SSE4_2 | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx =
+ IMB_FEATURE_AVX | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx2 = IMB_FEATURE_AVX2 | detect_avx;
+ const uint64_t detect_avx512 = IMB_FEATURE_AVX512_SKX | detect_avx2;
+ MB_MGR *p_mgr = NULL;
+ enum arch_type_e arch_id;
+
+ if (arch_support == NULL) {
+ fprintf(stderr, "Array not passed correctly\n");
+ return -1;
+ }
+
+ for (arch_id = ARCH_SSE; arch_id < NUM_ARCHS; arch_id++)
+ arch_support[arch_id] = 1;
+
+ p_mgr = alloc_mb_mgr(0);
+ if (p_mgr == NULL) {
+ fprintf(stderr, "Architecture detect error!\n");
+ return -1;
+ }
+
+ if ((p_mgr->features & detect_avx512) != detect_avx512)
+ arch_support[ARCH_AVX512] = 0;
+
+ if ((p_mgr->features & detect_avx2) != detect_avx2)
+ arch_support[ARCH_AVX2] = 0;
+
+ if ((p_mgr->features & detect_avx) != detect_avx)
+ arch_support[ARCH_AVX] = 0;
+
+ if ((p_mgr->features & detect_sse) != detect_sse)
+ arch_support[ARCH_SSE] = 0;
+
+ free_mb_mgr(p_mgr);
+
+ return 0;
+}
+
+/*
+ * Check string argument is supported and if it is, return values associated
+ * with it.
+ */
+static const union params *
+check_string_arg(const char *param, const char *arg,
+ const struct str_value_mapping *map,
+ const unsigned int num_avail_opts)
+{
+ unsigned int i;
+
+ if (arg == NULL) {
+ fprintf(stderr, "%s requires an argument\n", param);
+ goto exit;
+ }
+
+ for (i = 0; i < num_avail_opts; i++)
+ if (strcmp(arg, map[i].name) == 0)
+ return &(map[i].values);
+
+ /* Argument is not listed in the available options */
+ fprintf(stderr, "Invalid argument for %s\n", param);
+exit:
+ fprintf(stderr, "Accepted arguments: ");
+ for (i = 0; i < num_avail_opts; i++)
+ fprintf(stderr, "%s ", map[i].name);
+ fprintf(stderr, "\n");
+
+ return NULL;
+}
+
+static int
+parse_range(const char * const *argv, const int index, const int argc,
+ uint32_t range_values[NUM_RANGE])
+{
+ char *token;
+ uint32_t number;
+ unsigned int i;
+
+
+ if (range_values == NULL || argv == NULL || index < 0 || argc < 0) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (index >= (argc - 1)) {
+ fprintf(stderr, "'%s' requires an argument!\n", argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+ char *copy_arg = strdup(argv[index + 1]);
+
+ if (copy_arg == NULL) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ errno = 0;
+ token = strtok(copy_arg, ":");
+
+ /* Try parsing range (minimum, step and maximum values) */
+ for (i = 0; i < NUM_RANGE; i++) {
+ if (token == NULL)
+ goto no_range;
+
+ number = strtoul(token, NULL, 10);
+
+ if (errno != 0)
+ goto no_range;
+
+ range_values[i] = number;
+ token = strtok(NULL, ":");
+ }
+
+ if (token != NULL)
+ goto no_range;
+
+ if (range_values[RANGE_MAX] < range_values[RANGE_MIN]) {
+ fprintf(stderr, "Maximum value of range cannot be lower "
+ "than minimum value\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (range_values[RANGE_STEP] == 0) {
+ fprintf(stderr, "Step value in range cannot be 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+ goto end_range;
+no_range:
+ /* Try parsing as single value */
+ get_next_num_arg(argv, index, argc, &job_sizes[RANGE_MIN],
+ sizeof(job_sizes[RANGE_MIN]));
+
+ job_sizes[RANGE_MAX] = job_sizes[RANGE_MIN];
+
+end_range:
+ free(copy_arg);
+ return (index + 1);
+
+}
+
+int main(int argc, char *argv[])
+{
+ uint32_t num_t = 0;
+ int i, core = 0;
+ struct thread_info *thread_info_p = t_info;
+ unsigned int arch_id;
+ unsigned int arch_support[NUM_ARCHS];
+ const union params *values;
+ unsigned int cipher_algo_set = 0;
+ unsigned int hash_algo_set = 0;
+ unsigned int aead_algo_set = 0;
+ unsigned int cipher_dir_set = 0;
+#ifdef _WIN32
+ HANDLE threads[MAX_NUM_THREADS];
+#else
+ pthread_t tids[MAX_NUM_THREADS];
+#endif
+
+ for (i = 1; i < argc; i++)
+ if (strcmp(argv[i], "-h") == 0) {
+ usage();
+ return EXIT_SUCCESS;
+ } else if (strcmp(argv[i], "-c") == 0) {
+ cache_type = COLD;
+ fprintf(stderr, "Cold cache, ");
+ } else if (strcmp(argv[i], "-w") == 0) {
+ cache_type = WARM;
+ fprintf(stderr, "Warm cache, ");
+ } else if (strcmp(argv[i], "--no-avx512") == 0) {
+ archs[ARCH_AVX512] = 0;
+ } else if (strcmp(argv[i], "--no-avx2") == 0) {
+ archs[ARCH_AVX2] = 0;
+ } else if (strcmp(argv[i], "--no-avx") == 0) {
+ archs[ARCH_AVX] = 0;
+ } else if (strcmp(argv[i], "--no-sse") == 0) {
+ archs[ARCH_SSE] = 0;
+ } else if (strcmp(argv[i], "--shani-on") == 0) {
+ flags &= (~IMB_FLAG_SHANI_OFF);
+ } else if (strcmp(argv[i], "--shani-off") == 0) {
+ flags |= IMB_FLAG_SHANI_OFF;
+ } else if (strcmp(argv[i], "--no-gcm") == 0) {
+ test_types[TTYPE_AES_GCM] = 0;
+ } else if (strcmp(argv[i], "--no-aes") == 0) {
+ test_types[TTYPE_AES_HMAC] = 0;
+ } else if (strcmp(argv[i], "--no-docsis") == 0) {
+ test_types[TTYPE_AES_DOCSIS] = 0;
+ } else if (strcmp(argv[i], "--no-ccm") == 0) {
+ test_types[TTYPE_AES_CCM] = 0;
+ } else if (strcmp(argv[i], "--no-des") == 0) {
+ test_types[TTYPE_AES_DES] = 0;
+ } else if (strcmp(argv[i], "--no-3des") == 0) {
+ test_types[TTYPE_AES_3DES] = 0;
+ } else if (strcmp(argv[i], "--no-pon") == 0) {
+ test_types[TTYPE_PON] = 0;
+ } else if (strcmp(argv[i], "--gcm-job-api") == 0) {
+ use_gcm_job_api = 1;
+ } else if (strcmp(argv[i], "--quick") == 0) {
+ iter_scale = ITER_SCALE_SHORT;
+ } else if (strcmp(argv[i], "--smoke") == 0) {
+ iter_scale = ITER_SCALE_SMOKE;
+ } else if (strcmp(argv[i], "--arch") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ arch_str_map,
+ DIM(arch_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ /*
+ * Disable all the other architectures
+ * and enable only the specified
+ */
+ memset(archs, 0, sizeof(archs));
+ archs[values->arch_type] = 1;
+ i++;
+ } else if (strcmp(argv[i], "--cipher-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ cipher_algo_str_map,
+ DIM(cipher_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.cipher_mode =
+ values->job_params.cipher_mode;
+ custom_job_params.aes_key_size =
+ values->job_params.aes_key_size;
+ test_types[TTYPE_CUSTOM] = 1;
+ cipher_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--cipher-dir") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ cipher_dir_str_map,
+ DIM(cipher_dir_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.cipher_dir =
+ values->job_params.cipher_dir;
+ cipher_dir_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--hash-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ hash_algo_str_map,
+ DIM(hash_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.hash_alg =
+ values->job_params.hash_alg;
+ test_types[TTYPE_CUSTOM] = 1;
+ hash_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--aead-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ aead_algo_str_map,
+ DIM(aead_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.cipher_mode =
+ values->job_params.cipher_mode;
+ custom_job_params.aes_key_size =
+ values->job_params.aes_key_size;
+ custom_job_params.hash_alg =
+ values->job_params.hash_alg;
+ test_types[TTYPE_CUSTOM] = 1;
+ aead_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "-o") == 0) {
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &sha_size_incr,
+ sizeof(sha_size_incr));
+ } else if (strcmp(argv[i], "--job-size") == 0) {
+ /* Try parsing the argument as a range first */
+ i = parse_range((const char * const *)argv, i, argc,
+ job_sizes);
+ if (job_sizes[RANGE_MAX] > JOB_SIZE_TOP) {
+ fprintf(stderr,
+ "Invalid job size %u (max %u)\n",
+ (unsigned) job_sizes[RANGE_MAX],
+ JOB_SIZE_TOP);
+ return EXIT_FAILURE;
+ }
+ } else if (strcmp(argv[i], "--aad-size") == 0) {
+ /* Get AAD size for both GCM and CCM */
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &gcm_aad_size,
+ sizeof(gcm_aad_size));
+ if (gcm_aad_size > AAD_SIZE_MAX) {
+ fprintf(stderr,
+ "Invalid AAD size %u (max %u)!\n",
+ (unsigned) gcm_aad_size,
+ AAD_SIZE_MAX);
+ return EXIT_FAILURE;
+ }
+ ccm_aad_size = gcm_aad_size;
+ } else if (strcmp(argv[i], "--job-iter") == 0) {
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &job_iter, sizeof(job_iter));
+ } else if (strcmp(argv[i], "--threads") == 0) {
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &num_t, sizeof(num_t));
+ if (num_t > (MAX_NUM_THREADS + 1)) {
+ fprintf(stderr, "Invalid number of threads!\n");
+ return EXIT_FAILURE;
+ }
+ } else if (strcmp(argv[i], "--cores") == 0) {
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &core_mask,
+ sizeof(core_mask));
+ } else if (strcmp(argv[i], "--unhalted-cycles") == 0) {
+ use_unhalted_cycles = 1;
+ } else if (strcmp(argv[i], "--no-progress-bar") == 0) {
+ silent_progress_bar = 1;
+ } else {
+ usage();
+ return EXIT_FAILURE;
+ }
+
+ if (test_types[TTYPE_CUSTOM]) {
+ /* Disable all other tests when custom test is selected */
+ memset(test_types, 0, sizeof(test_types));
+ test_types[TTYPE_CUSTOM] = 1;
+ if (aead_algo_set && (cipher_algo_set || hash_algo_set)) {
+ fprintf(stderr, "AEAD algorithm cannot be used "
+ "combined with another cipher/hash "
+ "algorithm\n");
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (cipher_algo_set == 0 && aead_algo_set == 0 && cipher_dir_set) {
+ fprintf(stderr, "--cipher-dir can only be used with "
+ "--cipher-algo or --aead-algo\n");
+ return EXIT_FAILURE;
+ }
+
+ if (test_types[TTYPE_AES_CCM] ||
+ custom_job_params.cipher_mode == TEST_CCM) {
+ if (ccm_aad_size > CCM_AAD_SIZE_MAX) {
+ fprintf(stderr, "AAD cannot be higher than %u in CCM\n",
+ CCM_AAD_SIZE_MAX);
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (job_sizes[RANGE_MIN] == 0) {
+ if (test_types[TTYPE_AES_HMAC] ||
+ test_types[TTYPE_AES_DOCSIS] ||
+ test_types[TTYPE_AES_DES] ||
+ test_types[TTYPE_AES_3DES] ||
+ (test_types[TTYPE_CUSTOM] &&
+ aead_algo_set == 0)) {
+ fprintf(stderr, "Buffer size cannot be 0 unless only "
+ "an AEAD algorithm is tested\n");
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Check num cores >= number of threads */
+ if ((core_mask != 0 && num_t != 0) && (num_t > bitcount(core_mask))) {
+ fprintf(stderr, "Insufficient number of cores in "
+ "core mask (0x%lx) to run %d threads!\n",
+ (unsigned long) core_mask, num_t);
+ return EXIT_FAILURE;
+ }
+
+ /* if cycles selected then init MSR module */
+ if (use_unhalted_cycles) {
+ if (core_mask == 0) {
+ fprintf(stderr, "Must specify core mask "
+ "when reading unhalted cycles!\n");
+ return EXIT_FAILURE;
+ }
+
+ if (init_msr_mod() != 0) {
+ fprintf(stderr, "Error initializing MSR module!\n");
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (detect_arch(arch_support) < 0)
+ return EXIT_FAILURE;
+
+ /* disable tests depending on instruction sets supported */
+ for (arch_id = 0; arch_id < NUM_ARCHS; arch_id++) {
+ if (archs[arch_id] == 1 && arch_support[arch_id] == 0) {
+ archs[arch_id] = 0;
+ fprintf(stderr,
+ "%s not supported. Disabling %s tests\n",
+ arch_str_map[arch_id].name,
+ arch_str_map[arch_id].name);
+ }
+ }
+
+ fprintf(stderr, "SHA size incr = %d\n", sha_size_incr);
+
+ if (test_types[TTYPE_AES_GCM] ||
+ (custom_job_params.cipher_mode == TEST_GCM))
+ fprintf(stderr, "GCM AAD = %"PRIu64"\n", gcm_aad_size);
+
+ if (test_types[TTYPE_AES_CCM] ||
+ (custom_job_params.cipher_mode == TEST_CCM))
+ fprintf(stderr, "CCM AAD = %"PRIu64"\n", ccm_aad_size);
+
+ if (archs[ARCH_SSE]) {
+ MB_MGR *p_mgr = alloc_mb_mgr(flags);
+
+ if (p_mgr == NULL) {
+ fprintf(stderr, "Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_sse(p_mgr);
+ fprintf(stderr, "%s SHA extensions (shani) for SSE arch\n",
+ (p_mgr->features & IMB_FEATURE_SHANI) ?
+ "Using" : "Not using");
+ free_mb_mgr(p_mgr);
+ }
+
+ memset(t_info, 0, sizeof(t_info));
+ init_offsets(cache_type);
+
+ srand(ITER_SCALE_LONG + ITER_SCALE_SHORT + ITER_SCALE_SMOKE);
+
+ if (num_t > 1) {
+ uint32_t n;
+
+ for (n = 0; n < (num_t - 1); n++, thread_info_p++) {
+ /* Set core if selected */
+ if (core_mask) {
+ core = next_core(core_mask, core);
+ thread_info_p->core = core++;
+ }
+
+ /* Allocate MB manager for each thread */
+ thread_info_p->p_mgr = alloc_mb_mgr(flags);
+ if (thread_info_p->p_mgr == NULL) {
+ fprintf(stderr, "Failed to allocate MB_MGR "
+ "structure for thread %u!\n",
+ (unsigned)(n + 1));
+ exit(EXIT_FAILURE);
+ }
+#ifdef _WIN32
+ threads[n] = (HANDLE)
+ _beginthread(&run_tests, 0,
+ (void *)thread_info_p);
+#else
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&tids[n], &attr, run_tests,
+ (void *)thread_info_p);
+#endif
+ }
+ }
+
+ thread_info_p->print_info = 1;
+ thread_info_p->p_mgr = alloc_mb_mgr(flags);
+ if (thread_info_p->p_mgr == NULL) {
+ fprintf(stderr, "Failed to allocate MB_MGR "
+ "structure for main thread!\n");
+ exit(EXIT_FAILURE);
+ }
+ if (core_mask) {
+ core = next_core(core_mask, core);
+ thread_info_p->core = core;
+ }
+
+ run_tests((void *)thread_info_p);
+ if (num_t > 1) {
+ uint32_t n;
+
+#ifdef _WIN32
+ WaitForMultipleObjects(num_t, threads, FALSE, INFINITE);
+#endif
+ for (n = 0; n < (num_t - 1); n++) {
+ fprintf(stderr, "Waiting on thread %u to finish...\n",
+ (unsigned)(n + 2));
+#ifdef _WIN32
+ CloseHandle(threads[n]);
+#else
+ pthread_join(tids[n], NULL);
+#endif
+ }
+ }
+
+ if (use_unhalted_cycles)
+ machine_fini();
+
+ return EXIT_SUCCESS;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/msr.c b/src/spdk/intel-ipsec-mb/LibPerfApp/msr.c
new file mode 100644
index 000000000..1382d031b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/msr.c
@@ -0,0 +1,304 @@
+/**********************************************************************
+ Copyright(c) 2018 Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+/**
+ * @brief Provides access to MSR read & write operations
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#else
+#include <windows.h>
+#ifdef WIN_MSR
+#include "OlsDef.h"
+#include "OlsApiInitExt.h"
+#include "OlsApiInit.h"
+#endif /* WIN_MSR */
+#endif
+
+#include "msr.h"
+
+static int *m_msr_fd = NULL; /**< MSR driver file descriptors table */
+static unsigned m_maxcores = 0; /**< max number of cores (size of the
+ table above too) */
+#ifdef WIN_MSR
+union msr_data {
+ uint64_t ui64;
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } ui32;
+};
+
+HMODULE hOpenLibSys = NULL;
+
+/**
+ * @brief Initialize WinRing0 driver
+ *
+ * @return Operation status
+ * @retval MACHINE_RETVAL_OK on success
+ */
+static int
+initMSRdriver(void)
+{
+ const BOOL result = InitOpenLibSys(&hOpenLibSys);
+
+ if (result == 0) {
+ hOpenLibSys = NULL;
+ fprintf(stderr, "Failed to load WinRing0 driver!\n");
+ return MACHINE_RETVAL_ERROR;
+ }
+
+ return MACHINE_RETVAL_OK;
+}
+
+/**
+ * @brief Shutdown WinRing0 driver
+ */
+static void
+deInitMSRdriver(void)
+{
+ const BOOL result = DeinitOpenLibSys(&hOpenLibSys);
+
+ if (result == 0)
+ fprintf(stderr, "Error shutting down WinRing0 driver!\n");
+
+ hOpenLibSys = NULL;
+}
+#endif /* WIN_MSR */
+
+int
+machine_init(const unsigned max_core_id)
+{
+ unsigned i;
+
+ if (max_core_id == 0)
+ return MACHINE_RETVAL_PARAM;
+#ifdef _WIN32
+#ifdef WIN_MSR
+ if (initMSRdriver() != MACHINE_RETVAL_OK)
+ return MACHINE_RETVAL_ERROR;
+#else
+ fprintf(stderr, "WinRing0 driver not available!\n");
+ return MACHINE_RETVAL_ERROR;
+#endif /* WIN_MSR */
+#endif /* _WIN32 */
+
+ m_maxcores = max_core_id + 1;
+
+ /**
+ * Allocate table to hold MSR driver file descriptors
+ * Each file descriptor is for a different core.
+ * Core id is an index to the table.
+ */
+ m_msr_fd = (int *)malloc(m_maxcores * sizeof(m_msr_fd[0]));
+ if (m_msr_fd == NULL) {
+ m_maxcores = 0;
+ return MACHINE_RETVAL_ERROR;
+ }
+
+ for (i = 0; i < m_maxcores; i++)
+ m_msr_fd[i] = -1;
+
+ return MACHINE_RETVAL_OK;
+}
+
+int
+machine_fini(void)
+{
+ ASSERT(m_msr_fd != NULL);
+ if (m_msr_fd == NULL)
+ return MACHINE_RETVAL_ERROR;
+#ifdef _WIN32
+#ifdef WIN_MSR
+ deInitMSRdriver();
+#endif
+#else
+ unsigned i;
+
+ /**
+ * Close open file descriptors and free up table memory.
+ */
+ for (i = 0; i < m_maxcores; i++)
+ if (m_msr_fd[i] != -1) {
+ close(m_msr_fd[i]);
+ m_msr_fd[i] = -1;
+ }
+#endif /* WIN_MSR */
+ free(m_msr_fd);
+ m_msr_fd = NULL;
+ m_maxcores = 0;
+
+ return MACHINE_RETVAL_OK;
+}
+
+#ifndef _WIN32
+/**
+ * @brief Returns MSR driver file descriptor for given core id
+ *
+ * File descriptor could be previously open and comes from
+ * m_msr_fd table or is open (& cached) during the call.
+ *
+ * @param lcore logical core id
+ *
+ * @return MSR driver file descriptor corresponding \a lcore
+ */
+static int
+msr_file_open(const unsigned lcore)
+{
+ ASSERT(lcore < m_maxcores);
+ ASSERT(m_msr_fd != NULL);
+
+ int fd = m_msr_fd[lcore];
+
+ if (fd < 0) {
+ char fname[32];
+
+ memset(fname, 0, sizeof(fname));
+ snprintf(fname, sizeof(fname)-1,
+ "/dev/cpu/%u/msr", lcore);
+ fd = open(fname, O_RDWR);
+ if (fd < 0)
+ fprintf(stderr, "Error opening file '%s'!\n", fname);
+ else
+ m_msr_fd[lcore] = fd;
+ }
+
+ return fd;
+}
+#endif /* _WIN32 */
+
+int
+msr_read(const unsigned lcore,
+ const uint32_t reg,
+ uint64_t *value)
+{
+ int ret = MACHINE_RETVAL_OK;
+#ifdef _WIN32
+#ifdef WIN_MSR
+ union msr_data msr;
+ BOOL status;
+#endif
+#endif
+ ASSERT(value != NULL);
+ if (value == NULL)
+ return MACHINE_RETVAL_PARAM;
+
+ ASSERT(lcore < m_maxcores);
+ if (lcore >= m_maxcores)
+ return MACHINE_RETVAL_PARAM;
+
+ ASSERT(m_msr_fd != NULL);
+ if (m_msr_fd == NULL)
+ return MACHINE_RETVAL_ERROR;
+#ifdef _WIN32
+#ifdef WIN_MSR
+ msr.ui64 = 0;
+ status = RdmsrTx((DWORD)reg, &(msr.ui32.low),
+ &(msr.ui32.high), (1ULL << lcore));
+ if (status)
+ *value = msr.ui64;
+ else
+ ret = MACHINE_RETVAL_ERROR;
+#endif /* WIN_MSR */
+#else
+ int fd = -1;
+ ssize_t read_ret = 0;
+
+ fd = msr_file_open(lcore);
+ if (fd < 0)
+ return MACHINE_RETVAL_ERROR;
+
+ read_ret = pread(fd, value, sizeof(value[0]), (off_t)reg);
+
+ if (read_ret != sizeof(value[0]))
+ ret = MACHINE_RETVAL_ERROR;
+#endif /* _WIN32 */
+ if (ret != MACHINE_RETVAL_OK)
+ fprintf(stderr, "RDMSR failed for reg[0x%x] on lcore %u\n",
+ (unsigned)reg, lcore);
+
+ return ret;
+}
+
+int
+msr_write(const unsigned lcore,
+ const uint32_t reg,
+ const uint64_t value)
+{
+ int ret = MACHINE_RETVAL_OK;
+#ifdef _WIN32
+#ifdef WIN_MSR
+ union msr_data msr;
+ BOOL status;
+#endif
+#endif
+ ASSERT(lcore < m_maxcores);
+ if (lcore >= m_maxcores)
+ return MACHINE_RETVAL_PARAM;
+
+ ASSERT(m_msr_fd != NULL);
+ if (m_msr_fd == NULL)
+ return MACHINE_RETVAL_ERROR;
+
+#ifdef _WIN32
+#ifdef WIN_MSR
+ msr.ui64 = value;
+ status = WrmsrTx((DWORD)reg, msr.ui32.low,
+ msr.ui32.high, (1ULL << lcore));
+ if (!status)
+ ret = MACHINE_RETVAL_ERROR;
+#endif /* WIN_MSR */
+#else
+ int fd = -1;
+ ssize_t write_ret = 0;
+
+ fd = msr_file_open(lcore);
+ if (fd < 0)
+ return MACHINE_RETVAL_ERROR;
+
+ write_ret = pwrite(fd, &value, sizeof(value), (off_t)reg);
+
+ if (write_ret != sizeof(value))
+ ret = MACHINE_RETVAL_ERROR;
+#endif /* _WIN32 */
+ if (ret != MACHINE_RETVAL_OK)
+ fprintf(stderr, "WRMSR failed for reg[0x%x] "
+ "<- value[0x%llx] on lcore %u\n",
+ (unsigned)reg, (unsigned long long)value, lcore);
+
+ return ret;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/msr.h b/src/spdk/intel-ipsec-mb/LibPerfApp/msr.h
new file mode 100644
index 000000000..afa8795c4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/msr.h
@@ -0,0 +1,114 @@
+/**********************************************************************
+ Copyright(c) 2018 Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+/**
+ * @brief Provides access to MSR read & write operations
+ */
+
+#ifndef __MSR_H__
+#define __MSR_H__
+
+#include <stdint.h>
+#include <stdlib.h>
+#ifdef DEBUG
+#include <assert.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef DEBUG
+#define ASSERT assert
+#else
+#define ASSERT(x)
+#endif
+
+#define MACHINE_DEFAULT_MAX_COREID 255 /**< max core id */
+
+#define MACHINE_RETVAL_OK 0 /**< everything OK */
+#define MACHINE_RETVAL_ERROR 1 /**< generic error */
+#define MACHINE_RETVAL_PARAM 2 /**< parameter error */
+
+/**
+ * @brief Initializes machine module
+ *
+ * @param [in] max_core_id maximum logical core id to be handled by machine
+ * module. If zero then default value assumed
+ * \a MACHINE_DEFAULT_MAX_COREID
+ *
+ * @return Operation status
+ * @retval MACHINE_RETVAL_OK on success
+ */
+int machine_init(const unsigned max_core_id);
+
+/**
+ * @brief Shuts down machine module
+ *
+ * @return Operation status
+ * @retval MACHINE_RETVAL_OK on success
+ */
+int machine_fini(void);
+
+/**
+ * @brief Executes RDMSR on \a lcore logical core
+ *
+ * @param [in] lcore logical core id
+ * @param [in] reg MSR to read from
+ * @param [out] value place to store MSR value at
+ *
+ * @return Operation status
+ * @retval MACHINE_RETVAL_OK on success
+ */
+int
+msr_read(const unsigned lcore,
+ const uint32_t reg,
+ uint64_t *value);
+
+/**
+ * @brief Executes WRMSR on \a lcore logical core
+ *
+ * @param [in] lcore logical core id
+ * @param [in] reg MSR to write to
+ * @param [in] value to be written into \a reg
+ *
+ * @return Operation status
+ * @retval MACHINE_RETVAL_OK on success
+ */
+int
+msr_write(const unsigned lcore,
+ const uint32_t reg,
+ const uint64_t value);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MSR_H__ */
diff --git a/src/spdk/intel-ipsec-mb/LibPerfApp/win_x64.mak b/src/spdk/intel-ipsec-mb/LibPerfApp/win_x64.mak
new file mode 100644
index 000000000..c30e46571
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibPerfApp/win_x64.mak
@@ -0,0 +1,81 @@
+#
+# Copyright (c) 2017-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+APP = ipsec_perf
+INSTNAME = intel-ipsec-mb
+
+!if !defined(PREFIX)
+PREFIX = C:\Program Files
+!endif
+
+!if exist("$(PREFIX)\$(INSTNAME)\libIPSec_MB.lib")
+IPSECLIB = "$(PREFIX)\$(INSTNAME)\libIPSec_MB.lib"
+INCDIR = -I"$(PREFIX)\$(INSTNAME)"
+!else
+IPSECLIB = ..\libIPSec_MB.lib
+INCDIR = -I..\ -I..\include
+!endif
+
+!ifdef WINRING0_DIR
+EXTRA_CFLAGS = $(EXTRA_CFLAGS) /DWIN_MSR
+INCDIR = $(INCDIR) -I$(WINRING0_DIR)
+!endif
+
+!ifdef DEBUG
+DCFLAGS = /Od /DDEBUG /Z7
+DLFLAGS = /debug
+!else
+DCFLAGS = /O2 /Oi
+DLFLAGS =
+!endif
+
+!if "$(GCM_BIG_DATA)" == "y"
+GCM_CFLAGS = /DGCM_BIG_DATA
+!else
+GCM_CFLAGS =
+!endif
+
+CC = cl
+# _CRT_SECURE_NO_WARNINGS disables warning C4996 about unsecure strtok() being used
+CFLAGS = /nologo /D_CRT_SECURE_NO_WARNINGS $(DCFLAGS) /Y- /W3 /WX- /Gm- /fp:precise /EHsc $(EXTRA_CFLAGS) $(GCM_CFLAGS) $(INCDIR)
+
+LNK = link
+LFLAGS = /out:$(APP).exe $(DLFLAGS)
+
+all: $(APP).exe
+
+$(APP).exe: ipsec_perf.obj msr.obj $(IPSECLIB)
+ $(LNK) $(LFLAGS) ipsec_perf.obj msr.obj $(IPSECLIB)
+
+ipsec_perf.obj: ipsec_perf.c
+ $(CC) /c $(CFLAGS) ipsec_perf.c
+
+msr.obj: msr.c
+ $(CC) /c $(CFLAGS) msr.c
+
+clean:
+ del /q ipsec_perf.obj msr.obj $(APP).exe $(APP).pdb $(APP).ilk
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/Makefile b/src/spdk/intel-ipsec-mb/LibTestApp/Makefile
new file mode 100644
index 000000000..98383fe31
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/Makefile
@@ -0,0 +1,131 @@
+#
+# Copyright (c) 2012-2019, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+TEST_APP := ipsec_MB_testapp
+XVALID_APP := ipsec_xvalid_test
+INSTPATH ?= /usr/include/intel-ipsec-mb.h
+
+USE_YASM ?= n
+YASM ?= yasm
+NASM ?= nasm
+
+CFLAGS = -g -DLINUX -D_GNU_SOURCE \
+ -W -Wall -Wextra -Wmissing-declarations -Wpointer-arith \
+ -Wcast-qual -Wundef -Wwrite-strings \
+ -Wformat -Wformat-security \
+ -Wunreachable-code -Wmissing-noreturn -Wsign-compare -Wno-endif-labels \
+ -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition \
+ -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv
+
+YASM_FLAGS := -f x64 -f elf64 -X gnu -g dwarf2 -DLINUX -D__linux__
+NASM_FLAGS := -felf64 -Xgnu -gdwarf -DLINUX -D__linux__
+
+LDFLAGS = -fPIE -z noexecstack -z relro -z now
+LDLIBS = -lIPSec_MB
+
+ifeq ("$(shell test -r $(INSTPATH) && echo -n yes)","yes")
+# library installed
+CFLAGS +=
+else
+# library not installed
+CFLAGS += -I../include -I../
+LDFLAGS += -L../
+endif
+
+ifeq ($(DEBUG),y)
+CFLAGS += -O0 -DDEBUG
+LDFLAGS += -g
+else
+CFLAGS += -O3
+endif
+
+ifeq ($(GCM_BIG_DATA),y)
+CFLAGS += -DGCM_BIG_DATA
+endif
+
+SOURCES := main.c gcm_test.c ctr_test.c customop_test.c des_test.c ccm_test.c \
+ cmac_test.c utils.c hmac_sha1_test.c hmac_sha256_sha512_test.c \
+ hmac_md5_test.c aes_test.c sha_test.c chained_test.c api_test.c pon_test.c \
+ ecb_test.c zuc_test.c kasumi_test.c snow3g_test.c direct_api_test.c
+
+%.o:%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+ASM_OBJECTS := misc.o
+
+OBJECTS := $(SOURCES:%.c=%.o)
+
+XVALID_SOURCES := ipsec_xvalid.c
+XVALID_OBJECTS := $(XVALID_SOURCES:%.c=%.o) $(ASM_OBJECTS)
+
+all: $(TEST_APP) $(XVALID_APP)
+
+$(TEST_APP): $(OBJECTS)
+ $(CC) $(LDFLAGS) $^ $(LDLIBS) -o $@
+
+$(XVALID_APP): $(XVALID_OBJECTS)
+ $(CC) $(LDFLAGS) $^ $(LDLIBS) -o $@
+
+main.o: main.c do_test.h
+gcm_test.o: gcm_test.c gcm_ctr_vectors_test.h
+ctr_test.o: ctr_test.c gcm_ctr_vectors_test.h
+pon_test.o: pon_test.c gcm_ctr_vectors_test.h
+des_test.o: des_test.c
+ccm_test.o: ccm_test.c utils.h
+cmac_test.o: cmac_test.c utils.h
+hmac_sha1_test.o: hmac_sha1_test.c utils.h
+hmac_md5_test.o: hmac_md5_test.c utils.h
+hmac_sha256_sha512_test.o: hmac_sha256_sha512_test.c utils.h
+aes_test.o: aes_test.c gcm_ctr_vectors_test.h utils.h
+ecb_test.o: ecb_test.c gcm_ctr_vectors_test.h utils.h
+customop_test.o: customop_test.c customop_test.h
+utils.o: utils.c utils.h
+sha_test.o: sha_test.c utils.h
+chained_test.o: chained_test.c utils.h
+api_test.o: api_test.c gcm_ctr_vectors_test.h
+zuc_test.o: zuc_test.c zuc_test_vectors.h
+kasumi_test.o: kasumi_test.c kasumi_test_vectors.h
+snow3g_test.o: snow3g_test.c snow3g_test_vectors.h
+ipsec_xvalid.o: ipsec_xvalid.c misc.h
+direct_api_test.o: direct_api_test.c
+
+.PHONY: clean
+clean:
+ -rm -f $(OBJECTS) $(TEST_APP) $(XVALID_OBJECTS) $(XVALID_APP) $(ASM_OBJECTS)
+
+SOURCES_STYLE := $(foreach infile,$(SOURCES),-f $(infile))
+CHECKPATCH?=checkpatch.pl
+.PHONY: style
+style:
+ $(CHECKPATCH) --no-tree --no-signoff --emacs --no-color \
+--ignore CODE_INDENT,INITIALISED_STATIC,LEADING_SPACE,SPLIT_STRING,\
+UNSPECIFIED_INT,ARRAY_SIZE,BLOCK_COMMENT_STYLE,GLOBAL_INITIALISERS,\
+AVOID_EXTERNS,COMPLEX_MACRO,USE_FUNC,CONSTANT_COMPARISON,MISSING_SPACE $(SOURCES_STYLE)
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/aes_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/aes_test.c
new file mode 100644
index 000000000..b3ac21c8c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/aes_test.c
@@ -0,0 +1,1117 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int aes_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+struct aes_vector {
+ const uint8_t *K; /* key */
+ const uint8_t *IV; /* initialization vector */
+ const uint8_t *P; /* plain text */
+ uint64_t Plen; /* plain text length */
+ const uint8_t *C; /* cipher text - same length as plain text */
+ uint32_t Klen; /* key length */
+};
+
+/*
+ * AES Test vectors from
+ * http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
+ */
+
+/* 128-bit */
+static const uint8_t K1[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t IV1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P1[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C1[] = {
+ 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46,
+ 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d,
+ 0x50, 0x86, 0xcb, 0x9b, 0x50, 0x72, 0x19, 0xee,
+ 0x95, 0xdb, 0x11, 0x3a, 0x91, 0x76, 0x78, 0xb2,
+ 0x73, 0xbe, 0xd6, 0xb8, 0xe3, 0xc1, 0x74, 0x3b,
+ 0x71, 0x16, 0xe6, 0x9e, 0x22, 0x22, 0x95, 0x16,
+ 0x3f, 0xf1, 0xca, 0xa1, 0x68, 0x1f, 0xac, 0x09,
+ 0x12, 0x0e, 0xca, 0x30, 0x75, 0x86, 0xe1, 0xa7
+};
+
+/* 192-bit */
+static const uint8_t K2[] = {
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b
+};
+static const uint8_t IV2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P2[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C2[] = {
+ 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d,
+ 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8,
+ 0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4,
+ 0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a,
+ 0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0,
+ 0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0,
+ 0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81,
+ 0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd
+};
+
+/* 256-bit */
+static const uint8_t K3[] = {
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4
+};
+static const uint8_t IV3[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P3[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C3[] = {
+ 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba,
+ 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6,
+ 0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d,
+ 0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d,
+ 0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf,
+ 0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61,
+ 0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc,
+ 0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b
+};
+
+/* Extra AES test vectors */
+
+/* 128-bit */
+static const uint8_t K4[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t IV4[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P4[] = {
+ 0xf7, 0xcd, 0x12, 0xfb, 0x4f, 0x8e, 0x50, 0xab,
+ 0x35, 0x8e, 0x56, 0xf9, 0x83, 0x53, 0x9a, 0x1a,
+ 0xfc, 0x47, 0x3c, 0x96, 0x01, 0xfe, 0x01, 0x87,
+ 0xd5, 0xde, 0x46, 0x24, 0x5c, 0x62, 0x8f, 0xba,
+ 0xba, 0x91, 0x17, 0x8d, 0xba, 0x5a, 0x79, 0xb1,
+ 0x57, 0x05, 0x4d, 0x08, 0xba, 0x1f, 0x30, 0xd3,
+ 0x80, 0x40, 0xe9, 0x37, 0xb0, 0xd6, 0x34, 0x87,
+ 0x33, 0xdd, 0xc0, 0x5b, 0x2d, 0x58, 0x1d, 0x2a,
+ 0x7b, 0xb6, 0xe3, 0xd0, 0xc8, 0xa0, 0x7a, 0x69,
+ 0xc8, 0x5d, 0x10, 0xa2, 0xc3, 0x39, 0xca, 0xaf,
+ 0x40, 0xdc, 0xc7, 0xcb, 0xff, 0x18, 0x7d, 0x51,
+ 0x06, 0x28, 0x28, 0x1f, 0x3a, 0x9c, 0x18, 0x7d,
+ 0x5b, 0xb5, 0xe9, 0x20, 0xc2, 0xae, 0x17, 0x7f,
+ 0xd1, 0x65, 0x7a, 0x75, 0xcf, 0x21, 0xa0, 0x1e,
+ 0x17, 0x1b, 0xf7, 0xe8, 0x62, 0x5f, 0xaf, 0x34,
+ 0x7f, 0xd8, 0x18, 0x4a, 0x94, 0xf2, 0x33, 0x90
+};
+static const uint8_t C4[] = {
+ 0xf0, 0x8f, 0x91, 0x13, 0x11, 0x01, 0xdc, 0xbb,
+ 0xcd, 0xf9, 0x95, 0x92, 0xda, 0xbf, 0x2a, 0x86,
+ 0xea, 0x8d, 0xa6, 0x08, 0xc8, 0xb5, 0x65, 0x82,
+ 0x93, 0x43, 0xb7, 0x0e, 0x14, 0x36, 0xb4, 0xcf,
+ 0xd8, 0x11, 0xab, 0x21, 0x5b, 0x64, 0xb8, 0xc5,
+ 0xee, 0x27, 0x93, 0x66, 0x59, 0xd9, 0x1d, 0xc9,
+ 0x84, 0x9d, 0x03, 0xbd, 0xab, 0xce, 0x6a, 0x14,
+ 0x76, 0x73, 0x17, 0xe3, 0xb3, 0xe5, 0x70, 0xe8,
+ 0xa2, 0xa8, 0xce, 0xb0, 0xf6, 0xc4, 0xc5, 0xb5,
+ 0x8e, 0x22, 0xef, 0x33, 0xdf, 0x18, 0x42, 0x40,
+ 0x56, 0xc4, 0xb9, 0x7f, 0x60, 0x9e, 0x8b, 0x45,
+ 0xc1, 0xbf, 0xa7, 0xfa, 0x1b, 0x3e, 0x02, 0x5d,
+ 0xb3, 0x04, 0x93, 0x30, 0xf5, 0xff, 0x8e, 0xb6,
+ 0x0a, 0xfb, 0x41, 0xfe, 0x09, 0xa5, 0x90, 0xc7,
+ 0x22, 0xab, 0xaa, 0x22, 0x89, 0xd8, 0x3c, 0x4e,
+ 0x46, 0x18, 0x93, 0xbf, 0x1a, 0xce, 0x77, 0x59
+};
+
+/* 192-bit */
+static const uint8_t K5[] = {
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b
+};
+static const uint8_t IV5[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P5[] = {
+ 0x19, 0x08, 0xa3, 0x58, 0x17, 0x14, 0x70, 0x5a,
+ 0xb8, 0xab, 0x4f, 0x5f, 0xa4, 0x25, 0x2b, 0xec,
+ 0xb6, 0x74, 0x0b, 0x9d, 0x56, 0x3b, 0xaf, 0xa3,
+ 0xa4, 0x2d, 0x3e, 0x1f, 0x18, 0x84, 0x3b, 0x4f,
+ 0x48, 0xd9, 0xa3, 0xfe, 0x59, 0x1e, 0x80, 0x67,
+ 0x44, 0x35, 0x26, 0x00, 0x78, 0xda, 0x68, 0xfa,
+ 0x61, 0x9c, 0xd8, 0x8e, 0x5c, 0xc1, 0xff, 0xeb,
+ 0x9c, 0x7d, 0xe7, 0xa9, 0x38, 0xeb, 0x66, 0xf8,
+ 0x6a, 0x46, 0x71, 0x51, 0x02, 0xba, 0x8d, 0x70,
+ 0x55, 0x5b, 0x60, 0xc6, 0x4c, 0xae, 0xda, 0x2e,
+ 0x17, 0xbb, 0x65, 0xef, 0x60, 0x85, 0x9e, 0x77,
+ 0xe5, 0x83, 0xef, 0x30, 0x08, 0x3a, 0xba, 0x80,
+ 0x28, 0xc0, 0xa1, 0x93, 0x4c, 0x2a, 0x0b, 0xe1,
+ 0xcb, 0xd0, 0xac, 0x72, 0x72, 0x1d, 0x96, 0x76,
+ 0x0e, 0xc0, 0xec, 0x7d, 0x84, 0xfd, 0xee, 0x08,
+ 0xa1, 0x11, 0x20, 0x0d, 0x59, 0x5c, 0x06, 0x3f,
+ 0xa3, 0xf1, 0xd7, 0xa3, 0x1d, 0x29, 0xc3, 0xaa,
+ 0x05, 0x2b, 0x74, 0x8c, 0x73, 0x60, 0x65, 0x43,
+ 0x76, 0xd4, 0xd7, 0x7b, 0x5f, 0x40, 0xf4, 0x77,
+ 0xe1, 0xcc, 0x85, 0x37, 0x1c, 0xd8, 0xda, 0x91,
+ 0xf0, 0x40, 0xb2, 0x43, 0x2d, 0x87, 0x51, 0xd0,
+ 0xce, 0x27, 0xa6, 0x60, 0xac, 0x67, 0xea, 0x8b,
+ 0xae, 0x46, 0x2e, 0x78, 0x06, 0x09, 0x8a, 0x82,
+ 0xb0, 0x0d, 0x57, 0x56, 0x82, 0xfe, 0x89, 0xd2
+};
+static const uint8_t C5[] = {
+ 0xfa, 0x88, 0xb3, 0x4e, 0x7f, 0x3e, 0x78, 0x4d,
+ 0xfd, 0xb3, 0x38, 0xee, 0xb0, 0xdd, 0x0d, 0xf5,
+ 0xeb, 0x24, 0xe6, 0x70, 0xd8, 0xac, 0xd7, 0xfa,
+ 0x41, 0x67, 0x2e, 0x2d, 0x7e, 0x9b, 0x26, 0xac,
+ 0xf1, 0x0f, 0x1f, 0x47, 0x6d, 0xff, 0x46, 0xd1,
+ 0x1a, 0xeb, 0xe9, 0x3c, 0x1b, 0x9d, 0x55, 0x86,
+ 0xde, 0xee, 0x3d, 0xd8, 0x12, 0x05, 0x12, 0x9d,
+ 0xff, 0x23, 0x97, 0x57, 0xb0, 0xdc, 0x7b, 0x7a,
+ 0xdf, 0xba, 0x7f, 0x69, 0x85, 0xdf, 0xa9, 0xfd,
+ 0x3e, 0xa7, 0x36, 0x26, 0x30, 0xdd, 0x07, 0x0f,
+ 0x89, 0x0b, 0x27, 0x9c, 0x23, 0xa1, 0xfa, 0x7d,
+ 0x4e, 0x64, 0x50, 0x07, 0x86, 0x13, 0x98, 0xee,
+ 0x05, 0xc6, 0x6c, 0xd9, 0xd1, 0xe8, 0xb2, 0x6b,
+ 0xe6, 0x73, 0x06, 0x39, 0xbb, 0x72, 0x74, 0xa3,
+ 0xc2, 0x1a, 0x40, 0xcd, 0xec, 0x40, 0x8f, 0x44,
+ 0xf8, 0x86, 0xff, 0x7e, 0xb7, 0xea, 0xda, 0xb0,
+ 0x5c, 0x25, 0xdf, 0x3f, 0x54, 0xda, 0xca, 0xea,
+ 0x76, 0xe5, 0xec, 0xbb, 0x21, 0xd3, 0x86, 0x8d,
+ 0x8a, 0x57, 0xf0, 0x31, 0x9f, 0x56, 0xa3, 0x1b,
+ 0xf9, 0x55, 0xe6, 0xa6, 0xde, 0xb7, 0x74, 0xcc,
+ 0x2b, 0x17, 0x9a, 0xe3, 0x1b, 0x74, 0x0d, 0x2b,
+ 0x99, 0xcd, 0x64, 0xe1, 0x7b, 0x7e, 0x1c, 0xcd,
+ 0x9b, 0x23, 0x02, 0x7d, 0x86, 0x52, 0xfd, 0x14,
+ 0x2d, 0xbb, 0x75, 0x3d, 0xa3, 0x3b, 0xc1, 0xe0
+};
+
+/* 256-bit */
+static const uint8_t K6[] = {
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4
+};
+static const uint8_t IV6[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P6[] = {
+ 0x0b, 0xe5, 0x48, 0xa6, 0xa1, 0xbc, 0xac, 0x81,
+ 0x80, 0x06, 0x5f, 0xae, 0x1e, 0x3f, 0x55, 0x73,
+ 0x6d, 0x36, 0x7f, 0x57, 0x3d, 0xa4, 0x4a, 0x6b,
+ 0xb6, 0x65, 0x2f, 0xb7, 0xe8, 0x85, 0x47, 0xe2,
+ 0x41, 0x42, 0xc2, 0x4e, 0x58, 0xf1, 0xde, 0x42,
+ 0x9f, 0x15, 0x4c, 0xaf, 0xea, 0x04, 0x20, 0xd0,
+ 0x1a, 0x19, 0x36, 0x74, 0x71, 0x12, 0x72, 0x1b,
+ 0xdb, 0x18, 0xf9, 0x0b, 0xb3, 0xf3, 0x63, 0xd4,
+ 0x62, 0x52, 0x8b, 0x63, 0x0f, 0x6b, 0x4d, 0xb9,
+ 0x70, 0xd6, 0x91, 0xa0, 0x43, 0x3f, 0x46, 0xfe,
+ 0x43, 0xbb, 0xb8, 0xdc, 0x5e, 0xdb, 0xd4, 0x1f,
+ 0xf0, 0x17, 0x94, 0x25, 0xee, 0x55, 0x67, 0xbf,
+ 0x4d, 0xda, 0x9d, 0xe7, 0x4b, 0xc6, 0x7a, 0xcf,
+ 0x8f, 0xd7, 0xbb, 0x29, 0x6e, 0x26, 0xd4, 0xc3,
+ 0x08, 0x9b, 0x67, 0x15, 0xe9, 0x2d, 0x9f, 0x2d,
+ 0x3c, 0x76, 0x26, 0xd3, 0xda, 0xfe, 0x6e, 0x73,
+ 0x9d, 0x09, 0x60, 0x4b, 0x35, 0x60, 0xdb, 0x77,
+ 0xb6, 0xc0, 0x45, 0x91, 0xf9, 0x14, 0x8a, 0x7a,
+ 0xdd, 0xe2, 0xf1, 0xdf, 0x8f, 0x12, 0x4f, 0xd7,
+ 0x75, 0xd6, 0x9a, 0x17, 0xda, 0x76, 0x88, 0xf0,
+ 0xfa, 0x44, 0x27, 0xbe, 0x61, 0xaf, 0x55, 0x9f,
+ 0xc7, 0xf0, 0x76, 0x77, 0xde, 0xca, 0xd1, 0x47,
+ 0x51, 0x55, 0xb1, 0xbf, 0xfa, 0x1e, 0xca, 0x28,
+ 0x17, 0x70, 0xf3, 0xb5, 0xd4, 0x32, 0x47, 0x04,
+ 0xe0, 0x92, 0xd8, 0xa5, 0x03, 0x69, 0x46, 0x99,
+ 0x7f, 0x1e, 0x3f, 0xb2, 0x93, 0x36, 0xa3, 0x88,
+ 0x75, 0x07, 0x68, 0xb8, 0x33, 0xce, 0x17, 0x3f,
+ 0x5c, 0xb7, 0x1e, 0x93, 0x38, 0xc5, 0x1d, 0x79,
+ 0x86, 0x7c, 0x9d, 0x9e, 0x2f, 0x69, 0x38, 0x0f,
+ 0x97, 0x5c, 0x67, 0xbf, 0xa0, 0x8d, 0x37, 0x0b,
+ 0xd3, 0xb1, 0x04, 0x87, 0x1d, 0x74, 0xfe, 0x30,
+ 0xfb, 0xd0, 0x22, 0x92, 0xf9, 0xf3, 0x23, 0xc9
+};
+static const uint8_t C6[] = {
+ 0x16, 0x60, 0x36, 0xd9, 0xcf, 0xe8, 0xd6, 0x07,
+ 0x81, 0xdf, 0x28, 0x0a, 0x40, 0x44, 0x61, 0x45,
+ 0x83, 0x28, 0xd5, 0x1b, 0xf7, 0x55, 0x54, 0x35,
+ 0xd3, 0x43, 0x73, 0x0e, 0x7a, 0xc3, 0x83, 0xb1,
+ 0xc9, 0xbd, 0x22, 0x70, 0xf0, 0xde, 0x8f, 0x92,
+ 0x5e, 0xe1, 0x56, 0xd3, 0x4d, 0x01, 0x64, 0xfa,
+ 0xe9, 0x83, 0x35, 0x60, 0x80, 0x70, 0xf5, 0xb5,
+ 0x13, 0x76, 0xd3, 0x88, 0xbb, 0x7f, 0x2d, 0x0a,
+ 0x31, 0x04, 0xb4, 0x77, 0x47, 0x91, 0x3f, 0xe4,
+ 0xa9, 0x9a, 0x19, 0xbe, 0xfb, 0xd6, 0x70, 0xae,
+ 0xb1, 0xea, 0xd5, 0x03, 0xd6, 0xb5, 0xca, 0x76,
+ 0x5e, 0x0d, 0x21, 0x31, 0x87, 0xf3, 0xb2, 0x2e,
+ 0xe2, 0xbc, 0x71, 0xb5, 0x8b, 0x7e, 0xa6, 0x09,
+ 0x78, 0x6e, 0x76, 0xe6, 0x61, 0xdf, 0x86, 0xe6,
+ 0x8d, 0x2f, 0x12, 0x43, 0x99, 0xf9, 0xf1, 0x86,
+ 0xf1, 0x55, 0xfd, 0x35, 0xcd, 0xe8, 0x92, 0x4e,
+ 0x87, 0x33, 0x77, 0x62, 0x64, 0xaa, 0x60, 0x07,
+ 0x33, 0x08, 0x45, 0xf5, 0xd6, 0xb0, 0x9c, 0xf4,
+ 0xba, 0xda, 0x17, 0x74, 0x74, 0x23, 0x54, 0x9c,
+ 0x7e, 0x86, 0x57, 0x83, 0x3d, 0xda, 0xc3, 0xe1,
+ 0x02, 0x90, 0xe3, 0x69, 0x80, 0x7a, 0x5b, 0x47,
+ 0xf5, 0xea, 0x83, 0x1a, 0xc6, 0x1a, 0xaa, 0x53,
+ 0x66, 0xfe, 0xe6, 0xbd, 0x72, 0x9b, 0x8b, 0x96,
+ 0xdb, 0x94, 0xa9, 0x5b, 0xc3, 0x40, 0x6a, 0xcd,
+ 0xf4, 0x78, 0x14, 0x29, 0x7b, 0x8f, 0x26, 0xb0,
+ 0x89, 0xbd, 0x03, 0x55, 0x33, 0x46, 0x4c, 0x96,
+ 0x2a, 0x58, 0x69, 0x7c, 0x9b, 0xdf, 0xba, 0xb8,
+ 0x75, 0x5b, 0xbc, 0x4b, 0x19, 0xd3, 0x9d, 0xee,
+ 0xfd, 0x17, 0x2f, 0x14, 0xea, 0xd9, 0x32, 0xd2,
+ 0xaa, 0xaf, 0x09, 0xce, 0x81, 0xca, 0x7f, 0xc1,
+ 0x50, 0x5d, 0x13, 0x3a, 0x91, 0x27, 0x16, 0x97,
+ 0x57, 0x1f, 0x5d, 0xc5, 0x2e, 0x56, 0xc2, 0xca
+};
+
+/* 128-bit */
+static const uint8_t K7[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t IV7[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P7[] = {
+ 0xdd, 0x14, 0xde, 0x30, 0xe0, 0xfd, 0x7b, 0x2a,
+ 0x94, 0x8e, 0x28, 0xa0, 0xf6, 0x93, 0x6e, 0xf5,
+ 0x92, 0x65, 0x1d, 0x5e, 0x78, 0x2a, 0x9d, 0x39,
+ 0xfc, 0xb8, 0x6d, 0x8b, 0xa5, 0xf4, 0x4b, 0x21,
+ 0xdd, 0x4e, 0xe9, 0xeb, 0xd7, 0xa7, 0xa1, 0x59,
+ 0xdc, 0x4c, 0x5e, 0xcc, 0x83, 0xab, 0xd3, 0x45,
+ 0xfe, 0x2c, 0x73, 0x23, 0xea, 0x45, 0xcb, 0x0c,
+ 0x12, 0x67, 0x28, 0xcd, 0xef, 0x4e, 0xca, 0xe2,
+ 0x1d, 0x92, 0x82, 0xd8, 0x0f, 0xa9, 0x36, 0x23,
+ 0x6d, 0x38, 0x68, 0xac, 0xa0, 0xeb, 0xdc, 0xcc,
+ 0xdf, 0xb8, 0x3a, 0x53, 0x04, 0x1a, 0x55, 0x27,
+ 0x8e, 0x22, 0x86, 0x8c, 0xbd, 0xdc, 0x6b, 0x12,
+ 0x9c, 0x69, 0xd2, 0x7a, 0x4b, 0x52, 0x5d, 0x76,
+ 0x34, 0xb9, 0x5e, 0x30, 0x0a, 0x8d, 0x1e, 0xf1,
+ 0x27, 0xda, 0x5b, 0xb9, 0x5e, 0xbf, 0x65, 0x34,
+ 0x00, 0xb6, 0xd2, 0xb0, 0x89, 0x12, 0xb6, 0x35,
+ 0xae, 0x27, 0x7f, 0x11, 0xe9, 0xf9, 0x1c, 0x71,
+ 0xc9, 0x50, 0xfe, 0xd4, 0x76, 0x50, 0x95, 0xf7,
+ 0xe1, 0x1c, 0x14, 0xcd, 0x67, 0x0f, 0xf0, 0x6d,
+ 0xa2, 0x93, 0x7b, 0x2c, 0x8d, 0x83, 0x5c, 0xff,
+ 0xe4, 0x95, 0xf3, 0xa1, 0xfd, 0x00, 0x77, 0x68,
+ 0x41, 0xb4, 0xfb, 0x81, 0xf4, 0x61, 0x1a, 0x84,
+ 0x5a, 0x53, 0xc3, 0xdc, 0xba, 0x0d, 0x67, 0x2e,
+ 0xcf, 0xf2, 0x30, 0xf5, 0x1d, 0xe9, 0xc4, 0x2c,
+ 0xac, 0x1f, 0xa7, 0x9c, 0x64, 0xfd, 0x45, 0x30,
+ 0x1b, 0xa1, 0x3b, 0x3d, 0xc7, 0xf5, 0xf9, 0xbb,
+ 0xba, 0x99, 0xa4, 0x12, 0x6e, 0x4e, 0xea, 0x0b,
+ 0x29, 0x7f, 0xcd, 0x84, 0x64, 0x50, 0x40, 0xb7,
+ 0x6a, 0x24, 0x29, 0xa4, 0xa7, 0xa1, 0xef, 0xa9,
+ 0xcf, 0xdf, 0x09, 0xff, 0xaa, 0x17, 0x5d, 0x82,
+ 0x74, 0xf5, 0xae, 0xd0, 0xe9, 0xec, 0xad, 0x5e,
+ 0xa7, 0x84, 0xda, 0xe7, 0x33, 0x58, 0x7e, 0x00,
+ 0x45, 0x5f, 0xbb, 0x15, 0xa3, 0x65, 0x0e, 0xf5,
+ 0x7e, 0x27, 0xe7, 0x04, 0x52, 0x58, 0x81, 0xd0,
+ 0xee, 0x8f, 0xaf, 0xe2, 0x3c, 0xbe, 0x08, 0x97,
+ 0x8a, 0x97, 0x12, 0xb0, 0x09, 0xfe, 0xa5, 0xeb,
+ 0xd1, 0x9c, 0x30, 0xe8, 0x9a, 0x3f, 0xe0, 0x38,
+ 0x34, 0x2b, 0xad, 0xb7, 0xc4, 0xda, 0x54, 0xab,
+ 0x97, 0x9c, 0x46, 0x2b, 0x2c, 0x0b, 0xb3, 0x49,
+ 0xcd, 0x9d, 0x32, 0x38, 0x3c, 0x1a, 0x49, 0xdc,
+ 0x2f, 0xe7, 0xcd, 0x8a, 0xb0, 0x76, 0xcf, 0x30,
+ 0xea, 0x0b, 0xb0, 0xb7, 0x63, 0xed, 0xb2, 0x8c,
+ 0xc9, 0x2c, 0xb7, 0x75, 0xa8, 0xf6, 0x63, 0xb6,
+ 0xcd, 0xb5, 0x63, 0xfb, 0x5f, 0x89, 0xae, 0x3d,
+ 0x33, 0x73, 0xaf, 0xde, 0xcb, 0x37, 0x0a, 0x50,
+ 0x6f, 0xae, 0xf3, 0xa6, 0x79, 0x85, 0xdd, 0xc5,
+ 0x24, 0xc5, 0x29, 0x23, 0x64, 0xef, 0x43, 0xd7,
+ 0xc4, 0xab, 0xd8, 0xb0, 0x84, 0x26, 0x6b, 0xe8,
+ 0xb1, 0x5d, 0xb5, 0x69, 0xfb, 0x97, 0x0e, 0x20,
+ 0xb3, 0xc1, 0x60, 0xad, 0x1a, 0xd2, 0xd6, 0x3a,
+ 0x73, 0x08, 0xf0, 0x47, 0x5f, 0xcf, 0x15, 0xf7,
+ 0x7b, 0xf3, 0x69, 0x08, 0x5a, 0x6b, 0x9f, 0xc7,
+ 0x12, 0xa1, 0xf0, 0xfb, 0x91, 0xc9, 0x07, 0x61,
+ 0x21, 0xa0, 0x30, 0x4c, 0x16, 0x81, 0xcd, 0x3c,
+ 0x61, 0xe8, 0x96, 0x91, 0x30, 0xdd, 0x0c, 0x0e,
+ 0x0b, 0xa1, 0x33, 0x95, 0x67, 0x99, 0xd6, 0x1e,
+ 0x1a, 0xb3, 0x12, 0xfd, 0xad, 0x46, 0x48, 0x87,
+ 0x5e, 0xe8, 0xd4, 0xf5, 0xac, 0xdf, 0xa7, 0x37,
+ 0xb8, 0xa1, 0x62, 0x8c, 0xb8, 0xb6, 0xb0, 0x69,
+ 0x63, 0x29, 0x60, 0x64, 0x26, 0xc3, 0xf8, 0x18,
+ 0x8e, 0x46, 0xa0, 0xc5, 0x45, 0x5c, 0x08, 0x2a,
+ 0xed, 0x29, 0x84, 0x11, 0xea, 0x59, 0xc0, 0x16,
+ 0xe2, 0x04, 0x30, 0x63, 0x22, 0x87, 0xb6, 0xc7,
+ 0x81, 0xa6, 0x58, 0xc0, 0xb2, 0xb0, 0x7d, 0xbc,
+ 0x16, 0x44, 0x6e, 0x5d, 0x6d, 0xce, 0x2a, 0xe0,
+ 0x20, 0x69, 0x35, 0xa1, 0x5d, 0x17, 0x48, 0x55,
+ 0x88, 0xfe, 0xde, 0x34, 0xe7, 0x18, 0xbf, 0x7e,
+ 0x0a, 0x1c, 0x32, 0x88, 0xab, 0xde, 0xe1, 0x02,
+ 0x61, 0x09, 0x58, 0x96, 0xef, 0x16, 0x73, 0xac,
+ 0xc0, 0x5c, 0x15, 0xca, 0x9b, 0xea, 0x0e, 0x05,
+ 0x97, 0x88, 0x09, 0xc5, 0xd0, 0x95, 0x90, 0xae,
+ 0xa5, 0xb5, 0x28, 0xc6, 0x5a, 0x7b, 0xb3, 0xcc,
+ 0xae, 0x57, 0x71, 0x83, 0x56, 0x57, 0xca, 0xe8,
+ 0x8b, 0x21, 0x0c, 0x37, 0x1d, 0xde, 0x85, 0xe2,
+ 0x1b, 0xa2, 0x38, 0xa0, 0xc5, 0xc7, 0x98, 0x7b,
+ 0xf9, 0x5e, 0x6a, 0x68, 0xb3, 0xed, 0x49, 0x5e,
+ 0x46, 0xb9, 0xc9, 0xf6, 0x34, 0xa6, 0x0e, 0xac,
+ 0x90, 0x72, 0xcf, 0xf8, 0x5b, 0x48, 0x13, 0x40,
+ 0x7a, 0xce, 0xfd, 0x3c, 0x16, 0xff, 0xb5, 0xea,
+ 0xb2, 0x56, 0x47, 0xcc, 0x9f, 0xbc, 0xae, 0x4a,
+ 0xc8, 0xa5, 0x59, 0x57, 0x01, 0xd7, 0x9f, 0xd7,
+ 0xbf, 0x13, 0xb1, 0xbf, 0xb7, 0x9a, 0xa0, 0xa1,
+ 0xc6, 0x66, 0x61, 0x96, 0xf2, 0xcd, 0x8c, 0xcb,
+ 0x3c, 0x67, 0xb5, 0xed, 0xb7, 0xa2, 0x54, 0x84,
+ 0x3c, 0xcb, 0x7e, 0xb3, 0x97, 0x05, 0xcb, 0x8f,
+ 0xa9, 0xc6, 0x3c, 0xa2, 0xbd, 0xbf, 0x3a, 0xb8,
+ 0x92, 0x08, 0x01, 0xea, 0xfd, 0x55, 0x2f, 0x27,
+ 0x2a, 0x82, 0x38, 0x26, 0x1d, 0x81, 0x19, 0x33,
+ 0x75, 0x3c, 0xa2, 0x13, 0x1e, 0x58, 0x9f, 0x0b,
+ 0x08, 0x5d, 0x7a, 0x2c, 0x9a, 0xd1, 0xa5, 0x4c,
+ 0x41, 0xb4, 0x1d, 0xf8, 0x42, 0x08, 0x87, 0xdd,
+ 0x8e, 0xc9, 0x05, 0xd2, 0x8c, 0xba, 0x93, 0x28,
+ 0xbe, 0x4a, 0x14, 0x13, 0x2a, 0x58, 0xf0, 0x1c,
+ 0xac, 0xc1, 0xc4, 0x49, 0xbc, 0xe1, 0xda, 0xb6,
+ 0x2d, 0x06, 0x98, 0x32, 0xea, 0xa3, 0x89, 0x11,
+ 0xca, 0x5f, 0x3e, 0xda, 0x24, 0xe2, 0xdb, 0x1e,
+ 0xca, 0xf3, 0xc0, 0xc7, 0x64, 0xee, 0x4b, 0x3d,
+ 0xa2, 0xee, 0x69, 0xb0, 0x3f, 0x2c, 0xd5, 0x49,
+ 0xba, 0x2d, 0x45, 0x7d, 0xdd, 0xb0, 0x0d, 0xc5,
+ 0xe0, 0x57, 0x95, 0xbe, 0xf8, 0x4a, 0x11, 0x46,
+ 0x4c, 0xbb, 0xdf, 0xa8, 0x5a, 0xf9, 0xff, 0x0e,
+ 0x31, 0xa9, 0x50, 0x5d, 0xc4, 0xb3, 0x3d, 0x09,
+ 0x46, 0x33, 0x39, 0x31, 0xd5, 0xb3, 0xe5, 0x91,
+ 0xcf, 0xca, 0x8a, 0xe0, 0xc2, 0x8e, 0xea, 0xbe,
+ 0x54, 0x64, 0x78, 0x0c, 0x25, 0x1c, 0x17, 0xbc,
+ 0x49, 0xf9, 0xc0, 0x30, 0x5f, 0x08, 0x04, 0x9d,
+ 0xb5, 0xe4, 0xeb, 0x9e, 0xe5, 0x1e, 0x6d, 0xbc,
+ 0x7b, 0xe7, 0xf0, 0xd1, 0xa0, 0x01, 0x18, 0x51,
+ 0x4f, 0x64, 0xc3, 0x9c, 0x70, 0x25, 0x4f, 0xed,
+ 0xc7, 0xbc, 0x19, 0x00, 0x09, 0x22, 0x97, 0x5d,
+ 0x6f, 0xe4, 0x47, 0x98, 0x05, 0xcd, 0xcc, 0xde,
+ 0xd5, 0xe3, 0xaf, 0xa3, 0xde, 0x69, 0x99, 0x2a,
+ 0xd1, 0x28, 0x4d, 0x7c, 0x89, 0xa0, 0xdb, 0xae,
+ 0xf9, 0xf1, 0x4a, 0x46, 0xdf, 0xbe, 0x1d, 0x37,
+ 0xf2, 0xd5, 0x36, 0x4a, 0x54, 0xe8, 0xc4, 0xfb,
+ 0x57, 0x77, 0x09, 0x05, 0x31, 0x99, 0xaf, 0x9a,
+ 0x17, 0xd1, 0x20, 0x93, 0x31, 0x89, 0xff, 0xed,
+ 0x0f, 0xf8, 0xed, 0xb3, 0xcf, 0x4c, 0x9a, 0x74,
+ 0xbb, 0x00, 0x36, 0x41, 0xd1, 0x13, 0x68, 0x73,
+ 0x78, 0x63, 0x42, 0xdd, 0x99, 0x15, 0x9a, 0xf4,
+ 0xe1, 0xad, 0x6d, 0xf6, 0x5e, 0xca, 0x20, 0x24,
+ 0xd7, 0x9d, 0x2f, 0x58, 0x97, 0xf7, 0xde, 0x31,
+ 0x51, 0xa3, 0x1c, 0xe2, 0x66, 0x24, 0x4b, 0xa1,
+ 0x56, 0x02, 0x32, 0xf4, 0x89, 0xf3, 0x86, 0x9a,
+ 0x85, 0xda, 0x95, 0xa8, 0x7f, 0x6a, 0x77, 0x02,
+ 0x3a, 0xba, 0xe0, 0xbe, 0x34, 0x5c, 0x9a, 0x1a
+};
+static const uint8_t C7[] = {
+ 0xfb, 0x04, 0xe9, 0x1c, 0xc3, 0x56, 0x9c, 0xb0,
+ 0xba, 0xc4, 0x66, 0xa3, 0xba, 0x45, 0xac, 0xb8,
+ 0xd6, 0xd8, 0x95, 0x6c, 0x28, 0xd1, 0x51, 0x6d,
+ 0xaa, 0x8c, 0x2e, 0xf1, 0x34, 0xab, 0xeb, 0x66,
+ 0xf9, 0x4e, 0x24, 0x61, 0x1d, 0x16, 0x99, 0xd5,
+ 0x10, 0x30, 0x42, 0x31, 0x68, 0x98, 0xc5, 0xdb,
+ 0x0c, 0x9f, 0x0a, 0x1a, 0x65, 0x7d, 0x03, 0x50,
+ 0xb8, 0x00, 0x0c, 0x40, 0x93, 0x6b, 0xa9, 0x1f,
+ 0x28, 0x87, 0x01, 0x3c, 0xe9, 0xeb, 0x0e, 0x10,
+ 0x0f, 0x35, 0xbe, 0x9c, 0x6a, 0xfa, 0x00, 0xac,
+ 0x25, 0x77, 0x5d, 0x49, 0xde, 0xdc, 0xa1, 0x62,
+ 0xa7, 0xb7, 0x30, 0x75, 0x36, 0x32, 0x31, 0xab,
+ 0x40, 0xbb, 0x96, 0xba, 0x46, 0x32, 0x53, 0x8c,
+ 0x35, 0x7d, 0xa4, 0x21, 0xfa, 0x6a, 0xeb, 0x68,
+ 0xe4, 0xa4, 0xbf, 0xac, 0x24, 0xbf, 0x59, 0x8e,
+ 0x98, 0xa6, 0x53, 0xca, 0xe3, 0x69, 0xdd, 0x47,
+ 0x6e, 0x18, 0x94, 0xf0, 0x40, 0x03, 0x59, 0x93,
+ 0x96, 0xde, 0x57, 0x96, 0x00, 0xaf, 0x56, 0x88,
+ 0xb5, 0x0d, 0x55, 0xbc, 0x24, 0xac, 0x11, 0xff,
+ 0x4d, 0x72, 0x82, 0xda, 0xf2, 0xee, 0xbc, 0x56,
+ 0x8a, 0x17, 0x24, 0x6b, 0x88, 0x7e, 0x9c, 0xdb,
+ 0x07, 0xdd, 0xd4, 0x12, 0x15, 0x4d, 0x9e, 0x1a,
+ 0x57, 0x12, 0x8d, 0x84, 0xdb, 0x17, 0x1a, 0x2f,
+ 0x7a, 0x3d, 0x4c, 0xbb, 0xc2, 0xb8, 0x73, 0xad,
+ 0x39, 0x13, 0xf8, 0x2e, 0xfc, 0xf9, 0x3b, 0x64,
+ 0x06, 0x9e, 0x78, 0x73, 0xff, 0x2b, 0x8c, 0x1b,
+ 0x4e, 0x21, 0x3e, 0x05, 0x4d, 0xee, 0x9d, 0x39,
+ 0x7c, 0x61, 0xe1, 0x18, 0x98, 0xe3, 0x50, 0x25,
+ 0xf9, 0x48, 0x5e, 0x66, 0x9d, 0x41, 0xa2, 0x08,
+ 0x3f, 0x88, 0x28, 0x03, 0x68, 0x8a, 0xfc, 0xf4,
+ 0x7a, 0xf5, 0xcb, 0x7d, 0xeb, 0x9e, 0xb2, 0x22,
+ 0xbc, 0x1a, 0x94, 0x51, 0xa4, 0x7b, 0x9a, 0x2c,
+ 0xb3, 0x67, 0x60, 0x94, 0x06, 0x31, 0x80, 0xa0,
+ 0xf7, 0x7f, 0xe8, 0x47, 0x00, 0xab, 0x0b, 0x56,
+ 0x09, 0xa6, 0xa4, 0x77, 0x18, 0xa5, 0x30, 0x81,
+ 0xd9, 0x7e, 0x2d, 0x6a, 0x77, 0x34, 0x4e, 0xca,
+ 0x72, 0x0d, 0xb3, 0x31, 0x87, 0x9c, 0x98, 0xc9,
+ 0x48, 0x4c, 0xa0, 0x8d, 0xed, 0x9d, 0x7b, 0x9e,
+ 0xb4, 0xfe, 0x05, 0x7f, 0x93, 0x56, 0xa8, 0x2b,
+ 0x07, 0x0b, 0xc5, 0x52, 0x96, 0xd5, 0x6a, 0xe4,
+ 0xf6, 0x38, 0x79, 0x67, 0xd6, 0xfe, 0x8c, 0x0b,
+ 0x33, 0xe0, 0xe8, 0x15, 0xe7, 0x70, 0x3e, 0xca,
+ 0xa7, 0x6a, 0xbb, 0x81, 0xf7, 0x94, 0x7f, 0x17,
+ 0xd6, 0x66, 0x96, 0xbf, 0x1c, 0x8f, 0x71, 0xb6,
+ 0x9c, 0x5c, 0xe2, 0x61, 0x47, 0x7b, 0x6e, 0xa2,
+ 0x87, 0x17, 0x55, 0x08, 0x1d, 0x10, 0xb1, 0x34,
+ 0x3c, 0x21, 0x16, 0x70, 0x3d, 0x0d, 0x93, 0x68,
+ 0x5e, 0x46, 0x22, 0x45, 0x00, 0xdb, 0xf0, 0x9b,
+ 0xa1, 0x1f, 0xc7, 0x5b, 0x17, 0xe1, 0x95, 0x07,
+ 0x57, 0xe5, 0xae, 0x5a, 0x6d, 0x10, 0x83, 0xc4,
+ 0x1c, 0x0d, 0xf5, 0x73, 0xd3, 0xeb, 0x52, 0x29,
+ 0x33, 0x4f, 0xb0, 0xe7, 0x5c, 0xf6, 0xdb, 0xb5,
+ 0x21, 0x6f, 0x35, 0x9a, 0x43, 0x9c, 0x86, 0xeb,
+ 0x11, 0x95, 0x91, 0x10, 0xa3, 0xbd, 0xe2, 0xe4,
+ 0x69, 0xac, 0xb1, 0x50, 0xd4, 0xf1, 0x68, 0xe6,
+ 0x65, 0xb1, 0x96, 0xda, 0xfb, 0xf0, 0x13, 0x06,
+ 0xa4, 0x63, 0xb6, 0xdb, 0x79, 0x2b, 0x3a, 0xc9,
+ 0x98, 0x7a, 0x2c, 0x37, 0xf9, 0x4f, 0xa6, 0x93,
+ 0x9d, 0x3b, 0xb3, 0x06, 0x63, 0xe2, 0xf6, 0x92,
+ 0x07, 0xe2, 0x82, 0xfd, 0xb5, 0x08, 0x9b, 0x79,
+ 0x79, 0x78, 0x3b, 0xee, 0x28, 0x54, 0x81, 0x5d,
+ 0x7a, 0xa3, 0x81, 0x93, 0xa9, 0xc2, 0x59, 0x3f,
+ 0xb3, 0xc5, 0xcd, 0x89, 0xa2, 0x31, 0xc2, 0xf0,
+ 0x84, 0x8c, 0x2e, 0x0a, 0xa4, 0x2f, 0x9c, 0xf2,
+ 0x54, 0x56, 0xec, 0x75, 0x39, 0xd7, 0x92, 0x53,
+ 0x60, 0x58, 0xf8, 0x81, 0x84, 0x0c, 0x99, 0xc4,
+ 0x6f, 0x88, 0xf8, 0x6e, 0x6d, 0xd6, 0x08, 0x47,
+ 0x6a, 0xa4, 0x79, 0xbc, 0xeb, 0x1e, 0x67, 0xd7,
+ 0xdf, 0x0c, 0x52, 0xdc, 0x74, 0x40, 0x39, 0x17,
+ 0xdc, 0xd9, 0x13, 0x72, 0x58, 0xc5, 0x30, 0xda,
+ 0xad, 0x76, 0xa9, 0x9a, 0xad, 0xed, 0xfb, 0x4b,
+ 0x4e, 0x60, 0xde, 0xc9, 0x18, 0xa0, 0x77, 0x50,
+ 0x54, 0xfa, 0x00, 0xd6, 0xa9, 0x52, 0xfe, 0x67,
+ 0x3e, 0xe9, 0xdf, 0x46, 0x14, 0x6c, 0xfb, 0x50,
+ 0xd6, 0x21, 0xf6, 0xe5, 0xf7, 0x99, 0x38, 0xad,
+ 0x65, 0xa5, 0x6c, 0x4e, 0x21, 0x31, 0x77, 0x7a,
+ 0xdc, 0x6f, 0x5d, 0xb5, 0x7f, 0x63, 0xf4, 0xa8,
+ 0xee, 0x0d, 0x68, 0x10, 0xde, 0x5b, 0x45, 0x4b,
+ 0x03, 0xd8, 0x55, 0x04, 0x15, 0x6e, 0xc6, 0xb7,
+ 0xc1, 0x30, 0x29, 0x6a, 0x6c, 0x26, 0xe8, 0x41,
+ 0x53, 0xb9, 0x82, 0x67, 0x5b, 0xfe, 0xa9, 0x5f,
+ 0x0b, 0xf8, 0x38, 0xf8, 0xbe, 0x3c, 0x26, 0xf2,
+ 0x83, 0x94, 0xd6, 0x45, 0x64, 0x1f, 0x17, 0x20,
+ 0x4d, 0xae, 0x4a, 0x15, 0x27, 0x7d, 0x7f, 0x3b,
+ 0x71, 0x3c, 0x3a, 0xc3, 0x56, 0x1b, 0xe5, 0xbd,
+ 0x34, 0x4b, 0x3f, 0x88, 0x3e, 0xcc, 0x98, 0xb5,
+ 0x5e, 0x8b, 0xab, 0x18, 0x98, 0xf0, 0xef, 0x1b,
+ 0x78, 0x15, 0xb7, 0x4a, 0x1f, 0xe3, 0x45, 0xc7,
+ 0x31, 0x34, 0x5a, 0x7b, 0x6e, 0xb8, 0xea, 0xfe,
+ 0xaf, 0x34, 0x32, 0x45, 0xfa, 0x3e, 0x75, 0x8a,
+ 0x30, 0x3f, 0xed, 0xe5, 0xfe, 0x66, 0x15, 0xc7,
+ 0xbe, 0xd9, 0xc7, 0x27, 0x3c, 0x26, 0x66, 0x2d,
+ 0xa1, 0x0b, 0xb9, 0x1e, 0x17, 0x44, 0xd3, 0x4b,
+ 0xe6, 0x30, 0x85, 0x9e, 0x29, 0x3d, 0xa9, 0x35,
+ 0xca, 0x61, 0xea, 0x22, 0x76, 0xdb, 0xce, 0x82,
+ 0xfe, 0x8b, 0xac, 0xd3, 0x09, 0x90, 0xad, 0xf2,
+ 0x42, 0x45, 0x8b, 0xbd, 0xad, 0x34, 0x56, 0x67,
+ 0x3a, 0x81, 0x3d, 0x95, 0x37, 0x72, 0xe6, 0xcc,
+ 0x20, 0xe7, 0x09, 0x84, 0x99, 0x8b, 0x1a, 0x68,
+ 0x5f, 0x4e, 0x00, 0x14, 0x3e, 0x94, 0xa7, 0x15,
+ 0xab, 0xdd, 0x01, 0x2f, 0x9d, 0x57, 0xce, 0x24,
+ 0x40, 0x97, 0x5e, 0x62, 0x7c, 0x4f, 0xe7, 0x1d,
+ 0x53, 0x79, 0x05, 0x52, 0x5d, 0xc9, 0xc6, 0xe0,
+ 0x47, 0xc1, 0xb5, 0x7f, 0x47, 0x28, 0x7d, 0x0b,
+ 0xa8, 0x51, 0x27, 0xb9, 0x21, 0x97, 0x2d, 0x5b,
+ 0x03, 0x94, 0x30, 0x63, 0xa5, 0x02, 0x04, 0xf0,
+ 0x53, 0x53, 0x23, 0xfa, 0x81, 0xd1, 0x31, 0x3b,
+ 0x63, 0x5d, 0x61, 0x3b, 0x44, 0x19, 0xb2, 0x24,
+ 0x15, 0x79, 0x54, 0xb0, 0x57, 0x8c, 0x17, 0x0d,
+ 0x36, 0xad, 0xa3, 0x08, 0x71, 0x60, 0x85, 0xc9,
+ 0x5e, 0x7b, 0x55, 0x85, 0x8a, 0x90, 0x0b, 0x2c,
+ 0x2b, 0x9a, 0x5d, 0xb6, 0x0e, 0xb6, 0xa6, 0x1d,
+ 0xb1, 0xf5, 0xe1, 0xae, 0xf9, 0x94, 0xb6, 0x3d,
+ 0xd0, 0xad, 0x5b, 0xa7, 0x3a, 0x66, 0xd0, 0x31,
+ 0x45, 0xcc, 0xb7, 0x7f, 0xce, 0x0f, 0x07, 0x2e,
+ 0x64, 0x11, 0xe0, 0xcd, 0xac, 0xdb, 0x75, 0xb1,
+ 0x5a, 0x4a, 0x5b, 0x15, 0x6a, 0xe2, 0x28, 0x8c,
+ 0x6d, 0xe5, 0x5a, 0x82, 0x62, 0xeb, 0xfc, 0xf5,
+ 0x9b, 0x67, 0xa0, 0x79, 0x75, 0x24, 0x2e, 0xd4,
+ 0x3b, 0x53, 0xd4, 0xec, 0x6b, 0x0f, 0x43, 0x22,
+ 0xe3, 0xc3, 0x75, 0x83, 0x2d, 0x64, 0x5f, 0x8a,
+ 0x79, 0x49, 0x5f, 0x1a, 0x81, 0xeb, 0xd5, 0x47,
+ 0xc9, 0xe7, 0xa8, 0x14, 0xd9, 0xcc, 0xb0, 0xa4,
+ 0xea, 0xfc, 0x12, 0x23, 0xb3, 0x1b, 0x7a, 0xac,
+ 0x0f, 0x6a, 0x86, 0x4c, 0x4b, 0x91, 0x13, 0xa3,
+ 0x52, 0x51, 0x69, 0xc8, 0xff, 0x52, 0x8f, 0x44
+};
+
+static const struct aes_vector aes_vectors[] = {
+ {K1, IV1, P1, sizeof(P1), C1, sizeof(K1)},
+ {K2, IV2, P2, sizeof(P2), C2, sizeof(K2)},
+ {K3, IV3, P3, sizeof(P3), C3, sizeof(K3)},
+ {K4, IV4, P4, sizeof(P4), C4, sizeof(K4)},
+ {K5, IV5, P5, sizeof(P5), C5, sizeof(K5)},
+ {K6, IV6, P6, sizeof(P6), C6, sizeof(K6)},
+ {K7, IV7, P7, sizeof(P7), C7, sizeof(K7)},
+};
+
+/* DOCSIS: AES CFB */
+static const uint8_t DK1[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab,
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t DIV1[] = {
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a,
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a
+};
+static const uint8_t DP1[] = {
+ 0x00, 0x01, 0x02, 0x88, 0xee, 0x59, 0x7e
+};
+static const uint8_t DC1[] = {
+ 0xfc, 0x68, 0xa3, 0x55, 0x60, 0x37, 0xdc
+};
+
+/* DOCSIS: AES CBC + CFB */
+static const uint8_t DK2[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab,
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t DIV2[] = {
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a,
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a
+};
+static const uint8_t DP2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x91,
+ 0xd2, 0xd1, 0x9f
+};
+static const uint8_t DC2[] = {
+ 0x9d, 0xd1, 0x67, 0x4b, 0xba, 0x61, 0x10, 0x1b,
+ 0x56, 0x75, 0x64, 0x74, 0x36, 0x4f, 0x10, 0x1d,
+ 0x44, 0xd4, 0x73
+};
+
+/* DOCSIS: AES CBC */
+static const uint8_t DK3[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t DIV3[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t DP3[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t DC3[] = {
+ 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46,
+ 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d,
+ 0x50, 0x86, 0xcb, 0x9b, 0x50, 0x72, 0x19, 0xee,
+ 0x95, 0xdb, 0x11, 0x3a, 0x91, 0x76, 0x78, 0xb2,
+ 0x73, 0xbe, 0xd6, 0xb8, 0xe3, 0xc1, 0x74, 0x3b,
+ 0x71, 0x16, 0xe6, 0x9e, 0x22, 0x22, 0x95, 0x16,
+ 0x3f, 0xf1, 0xca, 0xa1, 0x68, 0x1f, 0xac, 0x09,
+ 0x12, 0x0e, 0xca, 0x30, 0x75, 0x86, 0xe1, 0xa7
+};
+
+static const struct aes_vector docsis_vectors[] = {
+ {DK1, DIV1, DP1, sizeof(DP1), DC1, sizeof(DK1)},
+ {DK2, DIV2, DP2, sizeof(DP2), DC2, sizeof(DK2)},
+ {DK3, DIV3, DP3, sizeof(DP3), DC3, sizeof(DK3)},
+};
+
+/* Test vectors from CM-SP-SECv3.1-I06-160602 section I.10.2 */
+static const uint8_t CFBK1[] = {
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef
+};
+static const uint8_t CFBIV1[] = {
+ 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef,
+ 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef
+};
+static const uint8_t CFBP1[] = {
+ 0x4e, 0x6f, 0x77, 0x20, 0x69, 0x73, 0x20, 0x74,
+ 0x68, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20
+};
+static const uint8_t CFBC1[] = {
+ 0x43, 0xbc, 0x0a, 0xd0, 0xfc, 0x8d, 0x93, 0xff,
+ 0x80, 0xe0, 0xbf, 0xf1, 0x41, 0xfc, 0x67, 0x08
+};
+
+static const uint8_t CFBK2[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab,
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t CFBIV2[] = {
+ 0x9d, 0xd1, 0x67, 0x4b, 0xba, 0x61, 0x10, 0x1b,
+ 0x56, 0x75, 0x64, 0x74, 0x36, 0x4f, 0x10, 0x1d
+};
+static const uint8_t CFBP2[] = {
+ 0xd2, 0xd1, 0x9f, /* 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 */
+};
+static const uint8_t CFBC2[] = {
+ 0x44, 0xd4, 0x73, /* 0xdd, 0x83, 0x9c, 0xee, 0x46,
+ 0x4c, 0xff, 0x83, 0xb7, 0x27, 0x96, 0xd6, 0x55 */
+};
+
+/*
+ * Test vectors from
+ * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
+ */
+static const uint8_t CFBK3[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t CFBIV3[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t CFBP3[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a
+};
+static const uint8_t CFBC3[] = {
+ 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20,
+ 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a
+};
+
+static const uint8_t CFBK4[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t CFBIV4[] = {
+ 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20,
+ 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a
+};
+static const uint8_t CFBP4[] = {
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51
+};
+static const uint8_t CFBC4[] = {
+ 0xc8, 0xa6, 0x45, 0x37, 0xa0, 0xb3, 0xa9, 0x3f,
+ 0xcd, 0xe3, 0xcd, 0xad, 0x9f, 0x1c, 0xe5, 0x8b
+};
+
+static const uint8_t CFBK5[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t CFBIV5[] = {
+ 0xc8, 0xa6, 0x45, 0x37, 0xa0, 0xb3, 0xa9, 0x3f,
+ 0xcd, 0xe3, 0xcd, 0xad, 0x9f, 0x1c, 0xe5, 0x8b
+};
+static const uint8_t CFBP5[] = {
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef
+};
+static const uint8_t CFBC5[] = {
+ 0x26, 0x75, 0x1f, 0x67, 0xa3, 0xcb, 0xb1, 0x40,
+ 0xb1, 0x80, 0x8c, 0xf1, 0x87, 0xa4, 0xf4, 0xdf
+};
+
+static const uint8_t CFBK6[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t CFBIV6[] = {
+ 0x26, 0x75, 0x1f, 0x67, 0xa3, 0xcb, 0xb1, 0x40,
+ 0xb1, 0x80, 0x8c, 0xf1, 0x87, 0xa4, 0xf4, 0xdf
+};
+static const uint8_t CFBP6[] = {
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t CFBC6[] = {
+ 0xc0, 0x4b, 0x05, 0x35, 0x7c, 0x5d, 0x1c, 0x0e,
+ 0xea, 0xc4, 0xc6, 0x6f, 0x9f, 0xf7, 0xf2, 0xe6
+};
+static struct aes_vector aes_cfb_128_tab[] = {
+ {CFBK1, CFBIV1, CFBP1, sizeof(CFBP1), CFBC1, sizeof(CFBK1)},
+ {CFBK2, CFBIV2, CFBP2, sizeof(CFBP2), CFBC2, sizeof(CFBK2)},
+ {CFBK3, CFBIV3, CFBP3, sizeof(CFBP3), CFBC3, sizeof(CFBK3)},
+ {CFBK4, CFBIV4, CFBP4, sizeof(CFBP4), CFBC4, sizeof(CFBK4)},
+ {CFBK5, CFBIV5, CFBP5, sizeof(CFBP5), CFBC5, sizeof(CFBK5)},
+ {CFBK6, CFBIV6, CFBP6, sizeof(CFBP6), CFBC6, sizeof(CFBK6)},
+};
+
+static int
+aes_job_ok(const struct JOB_AES_HMAC *job,
+ const uint8_t *out_text,
+ const uint8_t *target,
+ const uint8_t *padding,
+ const size_t sizeof_padding,
+ const unsigned text_len)
+{
+ const int num = (const int)((uint64_t)job->user_data2);
+
+ if (job->status != STS_COMPLETED) {
+ printf("%d error status:%d, job %d",
+ __LINE__, job->status, num);
+ return 0;
+ }
+ if (memcmp(out_text, target + sizeof_padding,
+ text_len)) {
+ printf("%d mismatched\n", num);
+ return 0;
+ }
+ if (memcmp(padding, target, sizeof_padding)) {
+ printf("%d overwrite head\n", num);
+ return 0;
+ }
+ if (memcmp(padding,
+ target + sizeof_padding + text_len,
+ sizeof_padding)) {
+ printf("%d overwrite tail\n", num);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_aes_many(struct MB_MGR *mb_mgr,
+ void *enc_keys,
+ void *dec_keys,
+ const void *iv,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ JOB_CIPHER_MODE cipher,
+ const int in_place,
+ const int key_len,
+ const int num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **targets = malloc(num_jobs * sizeof(void *));
+ int i, jobs_rx = 0, ret = -1;
+
+ assert(targets != NULL);
+
+ memset(padding, -1, sizeof(padding));
+
+ for (i = 0; i < num_jobs; i++) {
+ targets[i] = malloc(text_len + (sizeof(padding) * 2));
+ memset(targets[i], -1, text_len + (sizeof(padding) * 2));
+ if (in_place) {
+ /* copy input text to the allocated buffer */
+ memcpy(targets[i] + sizeof(padding), in_text, text_len);
+ }
+ }
+
+ /* flush the scheduler */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (!in_place) {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = in_text;
+ } else {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = targets[i] + sizeof(padding);
+ }
+ job->cipher_mode = cipher;
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->aes_key_len_in_bytes = key_len;
+
+ job->iv = iv;
+ job->iv_len_in_bytes = 16;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = text_len;
+ job->user_data = targets[i];
+ job->user_data2 = (void *)((uint64_t)i);
+
+ job->hash_alg = NULL_HASH;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != NULL) {
+ jobs_rx++;
+ if (!aes_job_ok(job, out_text, job->user_data, padding,
+ sizeof(padding), text_len))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!aes_job_ok(job, out_text, job->user_data, padding,
+ sizeof(padding), text_len))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++)
+ free(targets[i]);
+ free(targets);
+ return ret;
+}
+
+static int
+test_aes_vectors(struct MB_MGR *mb_mgr, const int vec_cnt,
+ const struct aes_vector *vec_tab, const char *banner,
+ const JOB_CIPHER_MODE cipher, const int num_jobs)
+{
+ int vect, errors = 0;
+ DECLARE_ALIGNED(uint32_t enc_keys[15*4], 16);
+ DECLARE_ALIGNED(uint32_t dec_keys[15*4], 16);
+
+ printf("%s (N jobs = %d):\n", banner, num_jobs);
+ for (vect = 0; vect < vec_cnt; vect++) {
+#ifdef DEBUG
+ printf("[%d/%d] Standard vector key_len:%d\n",
+ vect + 1, vec_cnt,
+ (int) vec_tab[vect].Klen);
+#else
+ printf(".");
+#endif
+ switch (vec_tab[vect].Klen) {
+ case 16:
+ IMB_AES_KEYEXP_128(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ case 24:
+ IMB_AES_KEYEXP_192(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ case 32:
+ default:
+ IMB_AES_KEYEXP_256(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ }
+
+ if (test_aes_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 0,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_aes_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 0,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_aes_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 1,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d encrypt in-place\n", vect + 1);
+ errors++;
+ }
+
+ if (test_aes_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 1,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d decrypt in-place\n", vect + 1);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+static int
+cfb128_validate_ok(const uint8_t *output, const uint8_t *in_text,
+ const size_t plen, const unsigned i, const unsigned is_enc,
+ const int in_place)
+{
+ if (memcmp(output, in_text, plen) != 0) {
+ printf("\nAES-CFB128 standard test vector %d %s (%s): fail\n",
+ i + 1, (is_enc) ? "encrypt" : "decrypt",
+ (in_place) ? "in-place" : "out-of-place");
+ return 0;
+ }
+#ifdef DEBUG
+ printf("Standard test vector %u %s %s\n", i + 1,
+ (in_place) ? "in-place" : "out-of-place",
+ (is_enc) ? "encrypt" : "decrypt");
+#else
+ printf(".");
+#endif
+
+ return 1;
+}
+
+static int
+cfb128_validate(struct MB_MGR *mb_mgr)
+{
+ unsigned i;
+
+ printf("AES-CFB128 standard test vectors:\n");
+ for (i = 0; i < DIM(aes_cfb_128_tab); i++) {
+ uint8_t output1[16];
+ uint8_t output2[16];
+ DECLARE_ALIGNED(uint32_t key[4], 16);
+ DECLARE_ALIGNED(uint32_t keys_enc[11*4], 16);
+ DECLARE_ALIGNED(uint32_t keys_dec[11*4], 16);
+
+ memcpy(key, aes_cfb_128_tab[i].K, aes_cfb_128_tab[i].Klen);
+ IMB_AES_KEYEXP_128(mb_mgr, key, keys_enc, keys_dec);
+
+ /* Out of place */
+
+ /* encrypt test */
+ IMB_AES128_CFB_ONE(mb_mgr, output1, aes_cfb_128_tab[i].P,
+ aes_cfb_128_tab[i].IV, keys_enc,
+ aes_cfb_128_tab[i].Plen);
+ if (!cfb128_validate_ok(output1, aes_cfb_128_tab[i].C,
+ aes_cfb_128_tab[i].Plen, i, 1, 0))
+ return 0;
+
+ /* decrypt test */
+ IMB_AES128_CFB_ONE(mb_mgr, output2, output1,
+ aes_cfb_128_tab[i].IV, keys_enc,
+ aes_cfb_128_tab[i].Plen);
+ if (!cfb128_validate_ok(output2, aes_cfb_128_tab[i].P,
+ aes_cfb_128_tab[i].Plen, i, 0, 0))
+ return 0;
+
+ /* In place */
+
+ /* encrypt test */
+ memcpy(output1, aes_cfb_128_tab[i].P, aes_cfb_128_tab[i].Plen);
+ IMB_AES128_CFB_ONE(mb_mgr, output1, output1,
+ aes_cfb_128_tab[i].IV, keys_enc,
+ aes_cfb_128_tab[i].Plen);
+ if (!cfb128_validate_ok(output1, aes_cfb_128_tab[i].C,
+ aes_cfb_128_tab[i].Plen, i, 1, 1))
+ return 0;
+
+ /* decrypt test */
+ memcpy(output1, aes_cfb_128_tab[i].C, aes_cfb_128_tab[i].Plen);
+ IMB_AES128_CFB_ONE(mb_mgr, output1, output1,
+ aes_cfb_128_tab[i].IV, keys_enc,
+ aes_cfb_128_tab[i].Plen);
+ if (!cfb128_validate_ok(output1, aes_cfb_128_tab[i].P,
+ aes_cfb_128_tab[i].Plen, i, 0, 1))
+ return 0;
+ }
+ printf("\n");
+ return 1;
+}
+
+int
+aes_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ const int num_jobs_tab[] = {
+ 1, 3, 4, 5, 7, 8, 9, 15, 16, 17
+ };
+ unsigned i;
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ for (i = 0; i < DIM(num_jobs_tab); i++)
+ errors += test_aes_vectors(mb_mgr, DIM(aes_vectors),
+ aes_vectors,
+ "AES-CBC standard test vectors", CBC,
+ num_jobs_tab[i]);
+ for (i = 0; i < DIM(num_jobs_tab); i++)
+ errors += test_aes_vectors(mb_mgr, DIM(docsis_vectors),
+ docsis_vectors,
+ "AES-DOCSIS standard test vectors",
+ DOCSIS_SEC_BPI, num_jobs_tab[i]);
+ if (!cfb128_validate(mb_mgr))
+ errors++;
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/api_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/api_test.c
new file mode 100644
index 000000000..ce5c20d23
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/api_test.c
@@ -0,0 +1,612 @@
+/*****************************************************************************
+ Copyright (c) 2018-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+
+int api_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/*
+ * @brief Performs JOB API behavior tests
+ */
+static int
+test_job_api(struct MB_MGR *mb_mgr)
+{
+ struct JOB_AES_HMAC *job, *job_next;
+
+ printf("JOB API behavior test:\n");
+
+ /* ======== test 1 */
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ if (job == NULL) {
+ printf("%s: test 1, unexpected job = NULL\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* ======== test 2 : invalid cipher and mac */
+ memset(job, 0, sizeof(*job));
+ job_next = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != job_next) {
+ /* Invalid job should be returned straight away */
+ printf("%s: test 2, unexpected job != job_next\n", __func__);
+ return 1;
+ }
+ printf(".");
+ if (job_next->status != STS_INVALID_ARGS) {
+ /* Invalid job is returned, and status should be INVALID_ARGS */
+ printf("%s: test 2, unexpected job->status != "
+ "STS_INVALID_ARGS\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ job_next = IMB_GET_NEXT_JOB(mb_mgr);
+ if (job == job_next) {
+ /* get next job should point to a new job slot */
+ printf("%s: test 2, unexpected job == get_next_job()\n",
+ __func__);
+ return 1;
+ }
+ printf(".");
+
+ job = IMB_GET_COMPLETED_JOB(mb_mgr);
+ if (job) {
+ /* there should not be any completed jobs left */
+ printf("%s: test 2, unexpected completed job\n",
+ __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* clean up */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Dummy function for custom hash and cipher modes
+ */
+static int dummy_cipher_hash_func(struct JOB_AES_HMAC *job)
+{
+ (void) job;
+ return 0;
+}
+
+/*
+ * @brief Fills in job structure with valid settings
+ */
+static void
+fill_in_job(struct JOB_AES_HMAC *job,
+ const JOB_CIPHER_MODE cipher_mode,
+ const JOB_CIPHER_DIRECTION cipher_direction,
+ const JOB_HASH_ALG hash_alg,
+ const JOB_CHAIN_ORDER chain_order)
+{
+ const uint64_t tag_len_tab[] = {
+ 0, /* INVALID selection */
+ 12, /* SHA1 */
+ 14, /* SHA_224 */
+ 16, /* SHA_256 */
+ 24, /* SHA_384 */
+ 32, /* SHA_512 */
+ 12, /* AES_XCBC */
+ 12, /* MD5 */
+ 0, /* NULL_HASH */
+ 16, /* AES_GMAC */
+ 0, /* CUSTOM HASH */
+ 16, /* AES_CCM */
+ 16, /* AES_CMAC */
+ 20, /* PLAIN_SHA1 */
+ 28, /* PLAIN_SHA_224 */
+ 32, /* PLAIN_SHA_256 */
+ 48, /* PLAIN_SHA_384 */
+ 64, /* PLAIN_SHA_512 */
+ };
+ static DECLARE_ALIGNED(uint8_t dust_bin[2048], 64);
+ static void *dust_keys[3] = {dust_bin, dust_bin, dust_bin};
+ const uint64_t msg_len_to_cipher = 32;
+ const uint64_t msg_len_to_hash = 48;
+
+ if (job == NULL)
+ return;
+
+ memset(job, 0, sizeof(*job));
+ job->chain_order = chain_order;
+ job->hash_alg = hash_alg;
+ job->cipher_mode = cipher_mode;
+ job->cipher_direction = cipher_direction;
+
+ switch (job->cipher_mode) {
+ case CBC:
+ if (job->cipher_direction == ENCRYPT)
+ job->aes_enc_key_expanded = dust_bin;
+ else
+ job->aes_dec_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(16);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(16);
+ break;
+ case CNTR:
+ job->aes_enc_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(16);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(16);
+ break;
+ case NULL_CIPHER:
+ break;
+ case DOCSIS_SEC_BPI:
+ /* it has to be set regardless of direction (AES-CFB) */
+ job->aes_enc_key_expanded = dust_bin;
+ if (job->cipher_direction == DECRYPT)
+ job->aes_dec_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(16);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(16);
+ break;
+ case GCM:
+ if (job->cipher_direction == ENCRYPT)
+ job->aes_enc_key_expanded = dust_bin;
+ else
+ job->aes_dec_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(16);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(12);
+ break;
+ case CUSTOM_CIPHER:
+ job->cipher_func = dummy_cipher_hash_func;
+ break;
+ case DES:
+ if (job->cipher_direction == ENCRYPT)
+ job->aes_enc_key_expanded = dust_bin;
+ else
+ job->aes_dec_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(8);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(8);
+ break;
+ case DOCSIS_DES:
+ if (job->cipher_direction == ENCRYPT)
+ job->aes_enc_key_expanded = dust_bin;
+ else
+ job->aes_dec_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(8);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(8);
+ break;
+ case CCM:
+ /* AES-CTR and CBC-MAC use only encryption keys */
+ job->aes_enc_key_expanded = dust_bin;
+ job->aes_key_len_in_bytes = UINT64_C(16);
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(13);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ break;
+ case DES3:
+ if (job->cipher_direction == ENCRYPT)
+ job->aes_enc_key_expanded = dust_keys;
+ else
+ job->aes_dec_key_expanded = dust_keys;
+ job->aes_key_len_in_bytes = UINT64_C(24);
+ job->msg_len_to_cipher_in_bytes = msg_len_to_cipher;
+ job->iv = dust_bin;
+ job->iv_len_in_bytes = UINT64_C(8);
+ break;
+ default:
+ break;
+ }
+
+ switch (job->hash_alg) {
+ case SHA1:
+ case AES_XCBC:
+ case MD5:
+ case SHA_224:
+ case SHA_256:
+ case SHA_384:
+ case SHA_512:
+ case PLAIN_SHA1:
+ case PLAIN_SHA_224:
+ case PLAIN_SHA_256:
+ case PLAIN_SHA_384:
+ case PLAIN_SHA_512:
+ job->msg_len_to_hash_in_bytes = msg_len_to_hash;
+ job->auth_tag_output = dust_bin;
+ job->auth_tag_output_len_in_bytes = tag_len_tab[job->hash_alg];
+ break;
+ case NULL_HASH:
+ break;
+ case CUSTOM_HASH:
+ job->hash_func = dummy_cipher_hash_func;
+ break;
+ case AES_GMAC:
+ job->msg_len_to_hash_in_bytes = msg_len_to_hash;
+ job->auth_tag_output = dust_bin;
+ job->auth_tag_output_len_in_bytes = tag_len_tab[job->hash_alg];
+ job->u.GCM.aad = dust_bin;
+ job->u.GCM.aad_len_in_bytes = 16;
+ break;
+ case AES_CCM:
+ job->u.CCM.aad = dust_bin;
+ job->u.CCM.aad_len_in_bytes = 16;
+ job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+ job->hash_start_src_offset_in_bytes =
+ job->cipher_start_src_offset_in_bytes;
+ job->auth_tag_output = dust_bin;
+ job->auth_tag_output_len_in_bytes = tag_len_tab[job->hash_alg];
+ break;
+ case AES_CMAC:
+ job->u.CMAC._key_expanded = dust_bin;
+ job->u.CMAC._skey1 = dust_bin;
+ job->u.CMAC._skey2 = dust_bin;
+ job->msg_len_to_hash_in_bytes = msg_len_to_hash;
+ job->auth_tag_output = dust_bin;
+ job->auth_tag_output_len_in_bytes = tag_len_tab[job->hash_alg];
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * @brief Submits \a job to \a mb_mgr and verifies it failed with
+ * invalid arguments status.
+ */
+static int
+is_submit_invalid(struct MB_MGR *mb_mgr, const struct JOB_AES_HMAC *job,
+ const int test_num)
+{
+ struct JOB_AES_HMAC *mb_job = NULL, *job_ret = NULL;
+
+ /* get next available job slot */
+ mb_job = IMB_GET_NEXT_JOB(mb_mgr);
+ if (mb_job == NULL) {
+ printf("%s : test %d, hash_alg %d, chain_order %d, "
+ "cipher_dir %d, cipher_mode %d : "
+ "unexpected get_next_job() == NULL\n",
+ __func__, test_num, (int) job->hash_alg,
+ (int) job->chain_order, (int) job->cipher_direction,
+ (int) job->cipher_mode);
+ return 0;
+ }
+
+ /* copy template job into available slot */
+ *mb_job = *job;
+
+ /* submit the job for processing */
+ job_ret = IMB_SUBMIT_JOB(mb_mgr);
+
+ /*
+ * Returned job can be a previously submitted job or NULL
+ * (if MB_MGR was empty).
+ * Let's keep asking for completed jobs until we get the submitted job.
+ */
+ while (job_ret != mb_job) {
+ job_ret = IMB_GET_COMPLETED_JOB(mb_mgr);
+ if (job_ret == NULL) {
+ printf("%s : test %d, hash_alg %d, chain_order %d, "
+ "cipher_dir %d, cipher_mode %d : "
+ "unexpected job_ret == NULL "
+ "(most likely job passed checks and got "
+ "submitted)\n",
+ __func__, test_num, (int) job->hash_alg,
+ (int) job->chain_order,
+ (int) job->cipher_direction,
+ (int) job->cipher_mode);
+ return 0;
+ }
+ }
+
+ if (job_ret->status != STS_INVALID_ARGS) {
+ printf("%s : test %d, hash_alg %d, chain_order %d, "
+ "cipher_dir %d, cipher_mode %d : "
+ "unexpected job->status %d != STS_INVALID_ARGS\n",
+ __func__, test_num, (int) job_ret->hash_alg,
+ (int) job_ret->chain_order,
+ (int) job_ret->cipher_direction,
+ (int) job_ret->cipher_mode, (int) job_ret->status);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * @brief Tests invalid settings for MAC modes
+ */
+static int
+test_job_invalid_mac_args(struct MB_MGR *mb_mgr)
+{
+ JOB_HASH_ALG hash;
+ JOB_CIPHER_DIRECTION dir;
+ const JOB_CIPHER_MODE cipher = NULL_CIPHER;
+ JOB_CHAIN_ORDER order;
+ struct JOB_AES_HMAC template_job;
+ struct JOB_AES_HMAC *job;
+
+ printf("Invalid JOB MAC arguments test:\n");
+
+ /* prep */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ /* ======== test 100
+ * SRC = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (hash = SHA1; hash <= PLAIN_SHA_512; hash++) {
+ if (hash == NULL_HASH ||
+ hash == CUSTOM_HASH)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.src = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 100))
+ return 1;
+ printf(".");
+ }
+
+ /* ======== test 101
+ * AUTH_TAG_OUTPUT = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (hash = SHA1; hash <= PLAIN_SHA_512; hash++) {
+ if (hash == NULL_HASH ||
+ hash == CUSTOM_HASH)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.auth_tag_output = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 101))
+ return 1;
+ printf(".");
+ }
+
+ /* ======== test 102
+ * AUTH_TAG_OUTPUT_LEN = 0
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (hash = SHA1; hash <= PLAIN_SHA_512; hash++) {
+ if (hash == NULL_HASH ||
+ hash == CUSTOM_HASH)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.auth_tag_output_len_in_bytes = 0;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 102))
+ return 1;
+ printf(".");
+ }
+
+ /* clean up */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Tests invalid settings for CIPHER modes
+ */
+static int
+test_job_invalid_cipher_args(struct MB_MGR *mb_mgr)
+{
+ const JOB_HASH_ALG hash = NULL_HASH;
+ JOB_CIPHER_DIRECTION dir;
+ JOB_CIPHER_MODE cipher;
+ JOB_CHAIN_ORDER order;
+ struct JOB_AES_HMAC template_job;
+ struct JOB_AES_HMAC *job;
+
+ printf("Invalid JOB CIPHER arguments test:\n");
+
+ /* prep */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ /* ======== test 200
+ * SRC = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (cipher = CBC; cipher <= DES3; cipher++) {
+ if (cipher == NULL_CIPHER ||
+ cipher == CUSTOM_CIPHER)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.src = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 200))
+ return 1;
+ printf(".");
+ }
+
+ /* ======== test 201
+ * DST = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (cipher = CBC; cipher <= DES3; cipher++) {
+ if (cipher == NULL_CIPHER ||
+ cipher == CUSTOM_CIPHER)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.dst = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 201))
+ return 1;
+ printf(".");
+ }
+
+ /* ======== test 202
+ * IV = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (dir = ENCRYPT; dir <= DECRYPT; dir++)
+ for (cipher = CBC; cipher <= DES3; cipher++) {
+ if (cipher == NULL_CIPHER ||
+ cipher == CUSTOM_CIPHER)
+ continue;
+
+ fill_in_job(&template_job, cipher, dir,
+ hash, order);
+ template_job.iv = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 202))
+ return 1;
+ printf(".");
+ }
+
+ /* ======== test 203 (encrypt)
+ * AES_ENC_KEY_EXPANDED = NULL
+ * AES_DEC_KEY_EXPANDED = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (cipher = CBC; cipher <= DES3; cipher++) {
+ fill_in_job(&template_job, cipher, ENCRYPT,
+ hash, order);
+ switch (cipher) {
+ case CBC:
+ case CNTR:
+ case DOCSIS_SEC_BPI:
+ case GCM:
+ case DES:
+ case DOCSIS_DES:
+ case CCM:
+ case DES3:
+ template_job.aes_enc_key_expanded = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 203))
+ return 1;
+ break;
+ case NULL_CIPHER:
+ case CUSTOM_CIPHER:
+ default:
+ break;
+ }
+ printf(".");
+ }
+
+ /* ======== test 204 (decrypt)
+ * AES_ENC_KEY_EXPANDED = NULL
+ * AES_DEC_KEY_EXPANDED = NULL
+ */
+ for (order = CIPHER_HASH; order <= HASH_CIPHER; order++)
+ for (cipher = CBC; cipher <= DES3; cipher++) {
+ fill_in_job(&template_job, cipher, DECRYPT,
+ hash, order);
+ switch (cipher) {
+ case GCM:
+ case CBC:
+ case DES:
+ case DES3:
+ case DOCSIS_DES:
+ template_job.aes_dec_key_expanded = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 204))
+ return 1;
+ break;
+ case CNTR:
+ case CCM:
+ template_job.aes_enc_key_expanded = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 204))
+ return 1;
+ break;
+ case DOCSIS_SEC_BPI:
+ template_job.aes_enc_key_expanded = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 204))
+ return 1;
+ template_job.aes_enc_key_expanded =
+ template_job.aes_dec_key_expanded;
+ template_job.aes_dec_key_expanded = NULL;
+ if (!is_submit_invalid(mb_mgr, &template_job,
+ 204))
+ return 1;
+ break;
+ case NULL_CIPHER:
+ case CUSTOM_CIPHER:
+ default:
+ break;
+ }
+ printf(".");
+ }
+
+ /* clean up */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ printf("\n");
+ return 0;
+}
+
+int
+api_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ errors += test_job_api(mb_mgr);
+ errors += test_job_invalid_mac_args(mb_mgr);
+ errors += test_job_invalid_cipher_args(mb_mgr);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/ccm_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/ccm_test.c
new file mode 100644
index 000000000..19ea25ea8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/ccm_test.c
@@ -0,0 +1,2092 @@
+/*****************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int ccm_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/*
+ * Test vectors from https://tools.ietf.org/html/rfc3610
+ */
+
+/*
+ * =============== Packet Vector #1 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 03 02 01 00 A0 A1 A2 A3 A4 A5
+ * Total packet length = 31. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E
+ * CBC IV in: 59 00 00 00 03 02 01 00 A0 A1 A2 A3 A4 A5 00 17
+ * CBC IV out:EB 9D 55 47 73 09 55 AB 23 1E 0A 2D FE 4B 90 D6
+ * After xor: EB 95 55 46 71 0A 51 AE 25 19 0A 2D FE 4B 90 D6 [hdr]
+ * After AES: CD B6 41 1E 3C DC 9B 4F 5D 92 58 B6 9E E7 F0 91
+ * After xor: C5 BF 4B 15 30 D1 95 40 4D 83 4A A5 8A F2 E6 86 [msg]
+ * After AES: 9C 38 40 5E A0 3C 1B C9 04 B5 8B 40 C7 6C A2 EB
+ * After xor: 84 21 5A 45 BC 21 05 C9 04 B5 8B 40 C7 6C A2 EB [msg]
+ * After AES: 2D C6 97 E4 11 CA 83 A8 60 C2 C4 06 CC AA 54 2F
+ * CBC-MAC : 2D C6 97 E4 11 CA 83 A8
+ * CTR Start: 01 00 00 00 03 02 01 00 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 50 85 9D 91 6D CB 6D DD E0 77 C2 D1 D4 EC 9F 97
+ * CTR[0002]: 75 46 71 7A C6 DE 9A FF 64 0C 9C 06 DE 6D 0D 8F
+ * CTR[MAC ]: 3A 2E 46 C8 EC 33 A5 48
+ * Total packet length = 39. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 58 8C 97 9A 61 C6 63 D2
+ * F0 66 D0 C2 C0 F9 89 80 6D 5F 6B 61 DA C3 84 17
+ * E8 D1 2C FD F9 26 E0
+ */
+static const uint8_t keys_01[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_01[] = {
+ 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_01[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E
+};
+static const uint8_t packet_out_01[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x58, 0x8C, 0x97, 0x9A, 0x61, 0xC6, 0x63, 0xD2,
+ 0xF0, 0x66, 0xD0, 0xC2, 0xC0, 0xF9, 0x89, 0x80,
+ 0x6D, 0x5F, 0x6B, 0x61, 0xDA, 0xC3, 0x84, 0x17,
+ 0xE8, 0xD1, 0x2C, 0xFD, 0xF9, 0x26, 0xE0
+};
+#define clear_len_01 8
+#define auth_len_01 8
+
+/*
+ * =============== Packet Vector #2 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 04 03 02 01 A0 A1 A2 A3 A4 A5
+ * Total packet length = 32. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * CBC IV in: 59 00 00 00 04 03 02 01 A0 A1 A2 A3 A4 A5 00 18
+ * CBC IV out:F0 C2 54 D3 CA 03 E2 39 70 BD 24 A8 4C 39 9E 77
+ * After xor: F0 CA 54 D2 C8 00 E6 3C 76 BA 24 A8 4C 39 9E 77 [hdr]
+ * After AES: 48 DE 8B 86 28 EA 4A 40 00 AA 42 C2 95 BF 4A 8C
+ * After xor: 40 D7 81 8D 24 E7 44 4F 10 BB 50 D1 81 AA 5C 9B [msg]
+ * After AES: 0F 89 FF BC A6 2B C2 4F 13 21 5F 16 87 96 AA 33
+ * After xor: 17 90 E5 A7 BA 36 DC 50 13 21 5F 16 87 96 AA 33 [msg]
+ * After AES: F7 B9 05 6A 86 92 6C F3 FB 16 3D C4 99 EF AA 11
+ * CBC-MAC : F7 B9 05 6A 86 92 6C F3
+ * CTR Start: 01 00 00 00 04 03 02 01 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 7A C0 10 3D ED 38 F6 C0 39 0D BA 87 1C 49 91 F4
+ * CTR[0002]: D4 0C DE 22 D5 F9 24 24 F7 BE 9A 56 9D A7 9F 51
+ * CTR[MAC ]: 57 28 D0 04 96 D2 65 E5
+ * Total packet length = 40. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 72 C9 1A 36 E1 35 F8 CF
+ * 29 1C A8 94 08 5C 87 E3 CC 15 C4 39 C9 E4 3A 3B
+ * A0 91 D5 6E 10 40 09 16
+ */
+static const uint8_t keys_02[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_02[] = {
+ 0x00, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_02[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+static const uint8_t packet_out_02[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x72, 0xC9, 0x1A, 0x36, 0xE1, 0x35, 0xF8, 0xCF,
+ 0x29, 0x1C, 0xA8, 0x94, 0x08, 0x5C, 0x87, 0xE3,
+ 0xCC, 0x15, 0xC4, 0x39, 0xC9, 0xE4, 0x3A, 0x3B,
+ 0xA0, 0x91, 0xD5, 0x6E, 0x10, 0x40, 0x09, 0x16
+};
+#define clear_len_02 8
+#define auth_len_02 8
+
+/*
+ * =============== Packet Vector #3 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 05 04 03 02 A0 A1 A2 A3 A4 A5
+ * Total packet length = 33. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * 20
+ * CBC IV in: 59 00 00 00 05 04 03 02 A0 A1 A2 A3 A4 A5 00 19
+ * CBC IV out:6F 8A 12 F7 BF 8D 4D C5 A1 19 6E 95 DF F0 B4 27
+ * After xor: 6F 82 12 F6 BD 8E 49 C0 A7 1E 6E 95 DF F0 B4 27 [hdr]
+ * After AES: 37 E9 B7 8C C2 20 17 E7 33 80 43 0C BE F4 28 24
+ * After xor: 3F E0 BD 87 CE 2D 19 E8 23 91 51 1F AA E1 3E 33 [msg]
+ * After AES: 90 CA 05 13 9F 4D 4E CF 22 6F E9 81 C5 9E 2D 40
+ * After xor: 88 D3 1F 08 83 50 50 D0 02 6F E9 81 C5 9E 2D 40 [msg]
+ * After AES: 73 B4 67 75 C0 26 DE AA 41 03 97 D6 70 FE 5F B0
+ * CBC-MAC : 73 B4 67 75 C0 26 DE AA
+ * CTR Start: 01 00 00 00 05 04 03 02 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 59 B8 EF FF 46 14 73 12 B4 7A 1D 9D 39 3D 3C FF
+ * CTR[0002]: 69 F1 22 A0 78 C7 9B 89 77 89 4C 99 97 5C 23 78
+ * CTR[MAC ]: 39 6E C0 1A 7D B9 6E 6F
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 51 B1 E5 F4 4A 19 7D 1D
+ * A4 6B 0F 8E 2D 28 2A E8 71 E8 38 BB 64 DA 85 96
+ * 57 4A DA A7 6F BD 9F B0 C5
+ */
+static const uint8_t keys_03[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_03[] = {
+ 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x02, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_03[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20
+};
+static const uint8_t packet_out_03[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x51, 0xB1, 0xE5, 0xF4, 0x4A, 0x19, 0x7D, 0x1D,
+ 0xA4, 0x6B, 0x0F, 0x8E, 0x2D, 0x28, 0x2A, 0xE8,
+ 0x71, 0xE8, 0x38, 0xBB, 0x64, 0xDA, 0x85, 0x96,
+ 0x57, 0x4A, 0xDA, 0xA7, 0x6F, 0xBD, 0x9F, 0xB0,
+ 0xC5
+};
+#define clear_len_03 8
+#define auth_len_03 8
+
+/*
+ * =============== Packet Vector #4 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 06 05 04 03 A0 A1 A2 A3 A4 A5
+ * Total packet length = 31. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E
+ * CBC IV in: 59 00 00 00 06 05 04 03 A0 A1 A2 A3 A4 A5 00 13
+ * CBC IV out:06 65 2C 60 0E F5 89 63 CA C3 25 A9 CD 3E 2B E1
+ * After xor: 06 69 2C 61 0C F6 8D 66 CC C4 2D A0 C7 35 2B E1 [hdr]
+ * After AES: A0 75 09 AC 15 C2 58 86 04 2F 80 60 54 FE A6 86
+ * After xor: AC 78 07 A3 05 D3 4A 95 10 3A 96 77 4C E7 BC 9D [msg]
+ * After AES: 64 4C 09 90 D9 1B 83 E9 AB 4B 8E ED 06 6F F5 BF
+ * After xor: 78 51 17 90 D9 1B 83 E9 AB 4B 8E ED 06 6F F5 BF [msg]
+ * After AES: 4B 4F 4B 39 B5 93 E6 BF B0 B2 C2 B7 0F 29 CD 7A
+ * CBC-MAC : 4B 4F 4B 39 B5 93 E6 BF
+ * CTR Start: 01 00 00 00 06 05 04 03 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: AE 81 66 6A 83 8B 88 6A EE BF 4A 5B 32 84 50 8A
+ * CTR[0002]: D1 B1 92 06 AC 93 9E 2F B6 DD CE 10 A7 74 FD 8D
+ * CTR[MAC ]: DD 87 2A 80 7C 75 F8 4E
+ * Total packet length = 39. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B A2 8C 68 65
+ * 93 9A 9A 79 FA AA 5C 4C 2A 9D 4A 91 CD AC 8C 96
+ * C8 61 B9 C9 E6 1E F1
+ */
+static const uint8_t keys_04[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_04[] = {
+ 0x00, 0x00, 0x00, 0x06, 0x05, 0x04, 0x03, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_04[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E
+};
+static const uint8_t packet_out_04[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0xA2, 0x8C, 0x68, 0x65,
+ 0x93, 0x9A, 0x9A, 0x79, 0xFA, 0xAA, 0x5C, 0x4C,
+ 0x2A, 0x9D, 0x4A, 0x91, 0xCD, 0xAC, 0x8C, 0x96,
+ 0xC8, 0x61, 0xB9, 0xC9, 0xE6, 0x1E, 0xF1
+};
+#define clear_len_04 12
+#define auth_len_04 8
+
+/*
+ * =============== Packet Vector #5 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 07 06 05 04 A0 A1 A2 A3 A4 A5
+ * Total packet length = 32. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * CBC IV in: 59 00 00 00 07 06 05 04 A0 A1 A2 A3 A4 A5 00 14
+ * CBC IV out:00 4C 50 95 45 80 3C 48 51 CD E1 3B 56 C8 9A 85
+ * After xor: 00 40 50 94 47 83 38 4D 57 CA E9 32 5C C3 9A 85 [hdr]
+ * After AES: E2 B8 F7 CE 49 B2 21 72 84 A8 EA 84 FA AD 67 5C
+ * After xor: EE B5 F9 C1 59 A3 33 61 90 BD FC 93 E2 B4 7D 47 [msg]
+ * After AES: 3E FB 36 72 25 DB 11 01 D3 C2 2F 0E CA FF 44 F3
+ * After xor: 22 E6 28 6D 25 DB 11 01 D3 C2 2F 0E CA FF 44 F3 [msg]
+ * After AES: 48 B9 E8 82 55 05 4A B5 49 0A 95 F9 34 9B 4B 5E
+ * CBC-MAC : 48 B9 E8 82 55 05 4A B5
+ * CTR Start: 01 00 00 00 07 06 05 04 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: D0 FC F5 74 4D 8F 31 E8 89 5B 05 05 4B 7C 90 C3
+ * CTR[0002]: 72 A0 D4 21 9F 0D E1 D4 04 83 BC 2D 3D 0C FC 2A
+ * CTR[MAC ]: 19 51 D7 85 28 99 67 26
+ * Total packet length = 40. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B DC F1 FB 7B
+ * 5D 9E 23 FB 9D 4E 13 12 53 65 8A D8 6E BD CA 3E
+ * 51 E8 3F 07 7D 9C 2D 93
+ */
+static const uint8_t keys_05[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_05[] = {
+ 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_05[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+static const uint8_t packet_out_05[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0xDC, 0xF1, 0xFB, 0x7B,
+ 0x5D, 0x9E, 0x23, 0xFB, 0x9D, 0x4E, 0x13, 0x12,
+ 0x53, 0x65, 0x8A, 0xD8, 0x6E, 0xBD, 0xCA, 0x3E,
+ 0x51, 0xE8, 0x3F, 0x07, 0x7D, 0x9C, 0x2D, 0x93
+};
+#define clear_len_05 12
+#define auth_len_05 8
+
+/*
+ * =============== Packet Vector #6 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 08 07 06 05 A0 A1 A2 A3 A4 A5
+ * Total packet length = 33. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * 20
+ * CBC IV in: 59 00 00 00 08 07 06 05 A0 A1 A2 A3 A4 A5 00 15
+ * CBC IV out:04 72 DA 4C 6F F6 0A 63 06 52 1A 06 04 80 CD E5
+ * After xor: 04 7E DA 4D 6D F5 0E 66 00 55 12 0F 0E 8B CD E5 [hdr]
+ * After AES: 64 4C 36 A5 A2 27 37 62 0B 89 F1 D7 BF F2 73 D4
+ * After xor: 68 41 38 AA B2 36 25 71 1F 9C E7 C0 A7 EB 69 CF [msg]
+ * After AES: 41 E1 19 CD 19 24 CE 77 F1 2F A6 60 C1 6E BB 4E
+ * After xor: 5D FC 07 D2 39 24 CE 77 F1 2F A6 60 C1 6E BB 4E [msg]
+ * After AES: A5 27 D8 15 6A C3 59 BF 1C B8 86 E6 2F 29 91 29
+ * CBC-MAC : A5 27 D8 15 6A C3 59 BF
+ * CTR Start: 01 00 00 00 08 07 06 05 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 63 CC BE 1E E0 17 44 98 45 64 B2 3A 8D 24 5C 80
+ * CTR[0002]: 39 6D BA A2 A7 D2 CB D4 B5 E1 7C 10 79 45 BB C0
+ * CTR[MAC ]: E5 7D DC 56 C6 52 92 2B
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 6F C1 B0 11
+ * F0 06 56 8B 51 71 A4 2D 95 3D 46 9B 25 70 A4 BD
+ * 87 40 5A 04 43 AC 91 CB 94
+ */
+static const uint8_t keys_06[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_06[] = {
+ 0x00, 0x00, 0x00, 0x08, 0x07, 0x06, 0x05, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_06[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20
+};
+static const uint8_t packet_out_06[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x6F, 0xC1, 0xB0, 0x11,
+ 0xF0, 0x06, 0x56, 0x8B, 0x51, 0x71, 0xA4, 0x2D,
+ 0x95, 0x3D, 0x46, 0x9B, 0x25, 0x70, 0xA4, 0xBD,
+ 0x87, 0x40, 0x5A, 0x04, 0x43, 0xAC, 0x91, 0xCB,
+ 0x94
+};
+#define clear_len_06 12
+#define auth_len_06 8
+
+/*
+ * =============== Packet Vector #7 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 09 08 07 06 A0 A1 A2 A3 A4 A5
+ * Total packet length = 31. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E
+ * CBC IV in: 61 00 00 00 09 08 07 06 A0 A1 A2 A3 A4 A5 00 17
+ * CBC IV out:60 06 C5 72 DA 23 9C BF A0 5B 0A DE D2 CD A8 1E
+ * After xor: 60 0E C5 73 D8 20 98 BA A6 5C 0A DE D2 CD A8 1E [hdr]
+ * After AES: 41 7D E2 AE 94 E2 EA D9 00 FC 44 FC D0 69 52 27
+ * After xor: 49 74 E8 A5 98 EF E4 D6 10 ED 56 EF C4 7C 44 30 [msg]
+ * After AES: 2A 6C 42 CA 49 D7 C7 01 C5 7D 59 FF 87 16 49 0E
+ * After xor: 32 75 58 D1 55 CA D9 01 C5 7D 59 FF 87 16 49 0E [msg]
+ * After AES: 89 8B D6 45 4E 27 20 BB D2 7E F3 15 7A 7C 90 B2
+ * CBC-MAC : 89 8B D6 45 4E 27 20 BB D2 7E
+ * CTR Start: 01 00 00 00 09 08 07 06 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 09 3C DB B9 C5 52 4F DA C1 C5 EC D2 91 C4 70 AF
+ * CTR[0002]: 11 57 83 86 E2 C4 72 B4 8E CC 8A AD AB 77 6F CB
+ * CTR[MAC ]: 8D 07 80 25 62 B0 8C 00 A6 EE
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 01 35 D1 B2 C9 5F 41 D5
+ * D1 D4 FE C1 85 D1 66 B8 09 4E 99 9D FE D9 6C 04
+ * 8C 56 60 2C 97 AC BB 74 90
+ */
+static const uint8_t keys_07[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_07[] = {
+ 0x00, 0x00, 0x00, 0x09, 0x08, 0x07, 0x06, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_07[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E
+};
+static const uint8_t packet_out_07[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x01, 0x35, 0xD1, 0xB2, 0xC9, 0x5F, 0x41, 0xD5,
+ 0xD1, 0xD4, 0xFE, 0xC1, 0x85, 0xD1, 0x66, 0xB8,
+ 0x09, 0x4E, 0x99, 0x9D, 0xFE, 0xD9, 0x6C, 0x04,
+ 0x8C, 0x56, 0x60, 0x2C, 0x97, 0xAC, 0xBB, 0x74,
+ 0x90
+};
+#define clear_len_07 8
+#define auth_len_07 10
+
+/*
+ * =============== Packet Vector #8 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 0A 09 08 07 A0 A1 A2 A3 A4 A5
+ * Total packet length = 32. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * CBC IV in: 61 00 00 00 0A 09 08 07 A0 A1 A2 A3 A4 A5 00 18
+ * CBC IV out:63 A3 FA E4 6C 79 F3 FA 78 38 B8 A2 80 36 B6 0B
+ * After xor: 63 AB FA E5 6E 7A F7 FF 7E 3F B8 A2 80 36 B6 0B [hdr]
+ * After AES: 1C 99 1A 3D B7 60 79 27 34 40 79 1F AD 8B 5B 02
+ * After xor: 14 90 10 36 BB 6D 77 28 24 51 6B 0C B9 9E 4D 15 [msg]
+ * After AES: 14 19 E8 E8 CB BE 75 58 E1 E3 BE 4B 6C 9F 82 E3
+ * After xor: 0C 00 F2 F3 D7 A3 6B 47 E1 E3 BE 4B 6C 9F 82 E3 [msg]
+ * After AES: E0 16 E8 1C 7F 7B 8A 38 A5 38 F2 CB 5B B6 C1 F2
+ * CBC-MAC : E0 16 E8 1C 7F 7B 8A 38 A5 38
+ * CTR Start: 01 00 00 00 0A 09 08 07 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 73 7C 33 91 CC 8E 13 DD E0 AA C5 4B 6D B7 EB 98
+ * CTR[0002]: 74 B7 71 77 C5 AA C5 3B 04 A4 F8 70 8E 92 EB 2B
+ * CTR[MAC ]: 21 6D AC 2F 8B 4F 1C 07 91 8C
+ * Total packet length = 42. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 7B 75 39 9A C0 83 1D D2
+ * F0 BB D7 58 79 A2 FD 8F 6C AE 6B 6C D9 B7 DB 24
+ * C1 7B 44 33 F4 34 96 3F 34 B4
+ */
+static const uint8_t keys_08[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_08[] = {
+ 0x00, 0x00, 0x00, 0x0a, 0x09, 0x08, 0x07, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_08[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+static const uint8_t packet_out_08[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x7B, 0x75, 0x39, 0x9A, 0xC0, 0x83, 0x1D, 0xD2,
+ 0xF0, 0xBB, 0xD7, 0x58, 0x79, 0xA2, 0xFD, 0x8F,
+ 0x6C, 0xAE, 0x6B, 0x6C, 0xD9, 0xB7, 0xDB, 0x24,
+ 0xC1, 0x7B, 0x44, 0x33, 0xF4, 0x34, 0x96, 0x3F,
+ 0x34, 0xB4
+};
+#define clear_len_08 8
+#define auth_len_08 10
+
+/*
+ * =============== Packet Vector #9 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 0B 0A 09 08 A0 A1 A2 A3 A4 A5
+ * Total packet length = 33. [Input with 8 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * 20
+ * CBC IV in: 61 00 00 00 0B 0A 09 08 A0 A1 A2 A3 A4 A5 00 19
+ * CBC IV out:4F 2C 86 11 1E 08 2A DD 6B 44 21 3A B5 13 13 16
+ * After xor: 4F 24 86 10 1C 0B 2E D8 6D 43 21 3A B5 13 13 16 [hdr]
+ * After AES: F6 EC 56 87 3C 57 12 DC 9C C5 3C A8 D4 D1 ED 0A
+ * After xor: FE E5 5C 8C 30 5A 1C D3 8C D4 2E BB C0 C4 FB 1D [msg]
+ * After AES: 17 C1 80 A5 31 53 D4 C3 03 85 0C 95 65 80 34 52
+ * After xor: 0F D8 9A BE 2D 4E CA DC 23 85 0C 95 65 80 34 52 [msg]
+ * After AES: 46 A1 F6 E2 B1 6E 75 F8 1C F5 6B 1A 80 04 44 1B
+ * CBC-MAC : 46 A1 F6 E2 B1 6E 75 F8 1C F5
+ * CTR Start: 01 00 00 00 0B 0A 09 08 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 8A 5A 10 6B C0 29 9A 55 5B 93 6B 0B 0E A0 DE 5A
+ * CTR[0002]: EA 05 FD E2 AB 22 5C FE B7 73 12 CB 88 D9 A5 4A
+ * CTR[MAC ]: AC 3D F1 07 DA 30 C4 86 43 BB
+ * Total packet length = 43. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 82 53 1A 60 CC 24 94 5A
+ * 4B 82 79 18 1A B5 C8 4D F2 1C E7 F9 B7 3F 42 E1
+ * 97 EA 9C 07 E5 6B 5E B1 7E 5F 4E
+ */
+static const uint8_t keys_09[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_09[] = {
+ 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_09[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20
+};
+static const uint8_t packet_out_09[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x82, 0x53, 0x1A, 0x60, 0xCC, 0x24, 0x94, 0x5A,
+ 0x4B, 0x82, 0x79, 0x18, 0x1A, 0xB5, 0xC8, 0x4D,
+ 0xF2, 0x1C, 0xE7, 0xF9, 0xB7, 0x3F, 0x42, 0xE1,
+ 0x97, 0xEA, 0x9C, 0x07, 0xE5, 0x6B, 0x5E, 0xB1,
+ 0x7E, 0x5F, 0x4E
+};
+#define clear_len_09 8
+#define auth_len_09 10
+
+/*
+ * =============== Packet Vector #10 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 0C 0B 0A 09 A0 A1 A2 A3 A4 A5
+ * Total packet length = 31. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E
+ * CBC IV in: 61 00 00 00 0C 0B 0A 09 A0 A1 A2 A3 A4 A5 00 13
+ * CBC IV out:7F B8 0A 32 E9 80 57 46 EC 31 6C 3A B2 A2 EB 5D
+ * After xor: 7F B4 0A 33 EB 83 53 43 EA 36 64 33 B8 A9 EB 5D [hdr]
+ * After AES: 7E 96 96 BF F1 56 D6 A8 6E AC F5 7B 7F 23 47 5A
+ * After xor: 72 9B 98 B0 E1 47 C4 BB 7A B9 E3 6C 67 3A 5D 41 [msg]
+ * After AES: 8B 4A EE 42 04 24 8A 59 FA CC 88 66 57 66 DD 72
+ * After xor: 97 57 F0 42 04 24 8A 59 FA CC 88 66 57 66 DD 72 [msg]
+ * After AES: 41 63 89 36 62 ED D7 EB CD 6E 15 C1 89 48 62 05
+ * CBC-MAC : 41 63 89 36 62 ED D7 EB CD 6E
+ * CTR Start: 01 00 00 00 0C 0B 0A 09 A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 0B 39 2B 9B 05 66 97 06 3F 12 56 8F 2B 13 A1 0F
+ * CTR[0002]: 07 89 65 25 23 40 94 3B 9E 69 B2 56 CC 5E F7 31
+ * CTR[MAC ]: 17 09 20 76 09 A0 4E 72 45 B3
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 07 34 25 94
+ * 15 77 85 15 2B 07 40 98 33 0A BB 14 1B 94 7B 56
+ * 6A A9 40 6B 4D 99 99 88 DD
+ */
+static const uint8_t keys_10[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_10[] = {
+ 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_10[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E
+};
+static const uint8_t packet_out_10[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x07, 0x34, 0x25, 0x94,
+ 0x15, 0x77, 0x85, 0x15, 0x2B, 0x07, 0x40, 0x98,
+ 0x33, 0x0A, 0xBB, 0x14, 0x1B, 0x94, 0x7B, 0x56,
+ 0x6A, 0xA9, 0x40, 0x6B, 0x4D, 0x99, 0x99, 0x88,
+ 0xDD
+};
+#define clear_len_10 12
+#define auth_len_10 10
+
+/*
+ * =============== Packet Vector #11 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 0D 0C 0B 0A A0 A1 A2 A3 A4 A5
+ * Total packet length = 32. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * CBC IV in: 61 00 00 00 0D 0C 0B 0A A0 A1 A2 A3 A4 A5 00 14
+ * CBC IV out:B0 84 85 79 51 D2 FA 42 76 EF 3A D7 14 B9 62 87
+ * After xor: B0 88 85 78 53 D1 FE 47 70 E8 32 DE 1E B2 62 87 [hdr]
+ * After AES: C9 B3 64 7E D8 79 2A 5C 65 B7 CE CC 19 0A 97 0A
+ * After xor: C5 BE 6A 71 C8 68 38 4F 71 A2 D8 DB 01 13 8D 11 [msg]
+ * After AES: 34 0F 69 17 FA B9 19 D6 1D AC D0 35 36 D6 55 8B
+ * After xor: 28 12 77 08 FA B9 19 D6 1D AC D0 35 36 D6 55 8B [msg]
+ * After AES: 6B 5E 24 34 12 CC C2 AD 6F 1B 11 C3 A1 A9 D8 BC
+ * CBC-MAC : 6B 5E 24 34 12 CC C2 AD 6F 1B
+ * CTR Start: 01 00 00 00 0D 0C 0B 0A A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: 6B 66 BC 0C 90 A1 F1 12 FC BE 6F 4E 12 20 77 BC
+ * CTR[0002]: 97 9E 57 2B BE 65 8A E5 CC 20 11 83 2A 9A 9B 5B
+ * CTR[MAC ]: 9E 64 86 DD 02 B6 49 C1 6D 37
+ * Total packet length = 42. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 67 6B B2 03
+ * 80 B0 E3 01 E8 AB 79 59 0A 39 6D A7 8B 83 49 34
+ * F5 3A A2 E9 10 7A 8B 6C 02 2C
+ */
+static const uint8_t keys_11[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_11[] = {
+ 0x00, 0x00, 0x00, 0x0d, 0x0c, 0x0b, 0x0a, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_11[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+static const uint8_t packet_out_11[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x67, 0x6B, 0xB2, 0x03,
+ 0x80, 0xB0, 0xE3, 0x01, 0xE8, 0xAB, 0x79, 0x59,
+ 0x0A, 0x39, 0x6D, 0xA7, 0x8B, 0x83, 0x49, 0x34,
+ 0xF5, 0x3A, 0xA2, 0xE9, 0x10, 0x7A, 0x8B, 0x6C,
+ 0x02, 0x2C
+};
+#define clear_len_11 12
+#define auth_len_11 10
+
+/*
+ * =============== Packet Vector #12 ==================
+ * AES Key = C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF
+ * Nonce = 00 00 00 0E 0D 0C 0B A0 A1 A2 A3 A4 A5
+ * Total packet length = 33. [Input with 12 cleartext header octets]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ * 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ * 20
+ * CBC IV in: 61 00 00 00 0E 0D 0C 0B A0 A1 A2 A3 A4 A5 00 15
+ * CBC IV out:5F 8E 8D 02 AD 95 7C 5A 36 14 CF 63 40 16 97 4F
+ * After xor: 5F 82 8D 03 AF 96 78 5F 30 13 C7 6A 4A 1D 97 4F [hdr]
+ * After AES: 63 FA BD 69 B9 55 65 FF 54 AA F4 60 88 7D EC 9F
+ * After xor: 6F F7 B3 66 A9 44 77 EC 40 BF E2 77 90 64 F6 84 [msg]
+ * After AES: 5A 76 5F 0B 93 CE 4F 6A B4 1D 91 30 18 57 6A D7
+ * After xor: 46 6B 41 14 B3 CE 4F 6A B4 1D 91 30 18 57 6A D7 [msg]
+ * After AES: 9D 66 92 41 01 08 D5 B6 A1 45 85 AC AF 86 32 E8
+ * CBC-MAC : 9D 66 92 41 01 08 D5 B6 A1 45
+ * CTR Start: 01 00 00 00 0E 0D 0C 0B A0 A1 A2 A3 A4 A5 00 01
+ * CTR[0001]: CC F2 AE D9 E0 4A C9 74 E6 58 55 B3 2B 94 30 BF
+ * CTR[0002]: A2 CA AC 11 63 F4 07 E5 E5 F6 E3 B3 79 0F 79 F8
+ * CTR[MAC ]: 50 7C 31 57 63 EF 78 D3 77 9E
+ * Total packet length = 43. [Authenticated and Encrypted Output]
+ * 00 01 02 03 04 05 06 07 08 09 0A 0B C0 FF A0 D6
+ * F0 5B DB 67 F2 4D 43 A4 33 8D 2A A4 BE D7 B2 0E
+ * 43 CD 1A A3 16 62 E7 AD 65 D6 DB
+ */
+static const uint8_t keys_12[] = {
+ 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF
+};
+static const uint8_t nonce_12[] = {
+ 0x00, 0x00, 0x00, 0x0e, 0x0d, 0x0c, 0x0b, 0xA0,
+ 0xA1, 0xA2, 0xA3, 0xA4, 0xA5
+};
+static const uint8_t packet_in_12[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20
+};
+static const uint8_t packet_out_12[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0xC0, 0xFF, 0xA0, 0xD6,
+ 0xF0, 0x5B, 0xDB, 0x67, 0xF2, 0x4D, 0x43, 0xA4,
+ 0x33, 0x8D, 0x2A, 0xA4, 0xBE, 0xD7, 0xB2, 0x0E,
+ 0x43, 0xCD, 0x1A, 0xA3, 0x16, 0x62, 0xE7, 0xAD,
+ 0x65, 0xD6, 0xDB
+};
+#define clear_len_12 12
+#define auth_len_12 10
+
+/*
+ * =============== Packet Vector #13 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 41 2B 4E A9 CD BE 3C 96 96 76 6C FA
+ * Total packet length = 31. [Input with 8 cleartext header octets]
+ * 0B E1 A8 8B AC E0 18 B1 08 E8 CF 97 D8 20 EA 25
+ * 84 60 E9 6A D9 CF 52 89 05 4D 89 5C EA C4 7C
+ * CBC IV in: 59 00 41 2B 4E A9 CD BE 3C 96 96 76 6C FA 00 17
+ * CBC IV out:33 AE C3 1A 1F B7 CC 35 E5 DA D2 BA C0 90 D9 A3
+ * After xor: 33 A6 C8 FB B7 3C 60 D5 FD 6B D2 BA C0 90 D9 A3 [hdr]
+ * After AES: B7 56 CA 1E 5B 42 C6 9C 58 E3 0A F5 2B F7 7C FD
+ * After xor: BF BE 05 89 83 62 2C B9 DC 83 E3 9F F2 38 2E 74 [msg]
+ * After AES: 33 3D 3A 3D 07 B5 3C 7B 22 0E 96 1A 18 A9 A1 9E
+ * After xor: 36 70 B3 61 ED 71 40 7B 22 0E 96 1A 18 A9 A1 9E [msg]
+ * After AES: 14 BD DB 6B F9 01 63 4D FB 56 51 83 BC 74 93 F7
+ * CBC-MAC : 14 BD DB 6B F9 01 63 4D
+ * CTR Start: 01 00 41 2B 4E A9 CD BE 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 44 51 B0 11 7A 84 82 BF 03 19 AE C1 59 5E BD DA
+ * CTR[0002]: 83 EB 76 E1 3A 44 84 7F 92 20 09 07 76 B8 25 C5
+ * CTR[MAC ]: F3 31 2C A0 F5 DC B4 FE
+ * Total packet length = 39. [Authenticated and Encrypted Output]
+ * 0B E1 A8 8B AC E0 18 B1 4C B9 7F 86 A2 A4 68 9A
+ * 87 79 47 AB 80 91 EF 53 86 A6 FF BD D0 80 F8 E7
+ * 8C F7 CB 0C DD D7 B3
+ */
+static const uint8_t keys_13[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_13[] = {
+ 0x00, 0x41, 0x2b, 0x4e, 0xa9, 0xcd, 0xbe, 0x3c,
+ 0x96, 0x96, 0x76, 0x6c, 0xfa
+};
+static const uint8_t packet_in_13[] = {
+ 0x0B, 0xE1, 0xA8, 0x8B, 0xAC, 0xE0, 0x18, 0xB1,
+ 0x08, 0xE8, 0xCF, 0x97, 0xD8, 0x20, 0xEA, 0x25,
+ 0x84, 0x60, 0xE9, 0x6A, 0xD9, 0xCF, 0x52, 0x89,
+ 0x05, 0x4D, 0x89, 0x5C, 0xEA, 0xC4, 0x7C
+};
+static const uint8_t packet_out_13[] = {
+ 0x0B, 0xE1, 0xA8, 0x8B, 0xAC, 0xE0, 0x18, 0xB1,
+ 0x4C, 0xB9, 0x7F, 0x86, 0xA2, 0xA4, 0x68, 0x9A,
+ 0x87, 0x79, 0x47, 0xAB, 0x80, 0x91, 0xEF, 0x53,
+ 0x86, 0xA6, 0xFF, 0xBD, 0xD0, 0x80, 0xF8, 0xE7,
+ 0x8C, 0xF7, 0xCB, 0x0C, 0xDD, 0xD7, 0xB3
+};
+#define clear_len_13 8
+#define auth_len_13 8
+
+/*
+ * =============== Packet Vector #14 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 33 56 8E F7 B2 63 3C 96 96 76 6C FA
+ * Total packet length = 32. [Input with 8 cleartext header octets]
+ * 63 01 8F 76 DC 8A 1B CB 90 20 EA 6F 91 BD D8 5A
+ * FA 00 39 BA 4B AF F9 BF B7 9C 70 28 94 9C D0 EC
+ * CBC IV in: 59 00 33 56 8E F7 B2 63 3C 96 96 76 6C FA 00 18
+ * CBC IV out:42 0D B1 50 BB 0C 44 DA 83 E4 52 09 55 99 67 E3
+ * After xor: 42 05 D2 51 34 7A 98 50 98 2F 52 09 55 99 67 E3 [hdr]
+ * After AES: EA D1 CA 56 02 02 09 5C E6 12 B0 D2 18 A0 DD 44
+ * After xor: 7A F1 20 39 93 BF D1 06 1C 12 89 68 53 0F 24 FB [msg]
+ * After AES: 51 77 41 69 C3 DE 6B 24 13 27 74 90 F5 FF C5 62
+ * After xor: E6 EB 31 41 57 42 BB C8 13 27 C5 62 [msg]
+ * After AES: D4 CC 3B 82 DF 9F CC 56 7E E5 83 61 D7 8D FB 5E
+ * CBC-MAC : D4 CC 3B 82 DF 9F CC 56
+ * CTR Start: 01 00 33 56 8E F7 B2 63 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: DC EB F4 13 38 3C 66 A0 5A 72 55 EF 98 D7 FF AD
+ * CTR[0002]: 2F 54 2C BA 15 D6 6C DF E1 EC 46 8F 0E 68 A1 24
+ * CTR[MAC ]: 11 E2 D3 9F A2 E8 0C DC
+ * Total packet length = 40. [Authenticated and Encrypted Output]
+ * 63 01 8F 76 DC 8A 1B CB 4C CB 1E 7C A9 81 BE FA
+ * A0 72 6C 55 D3 78 06 12 98 C8 5C 92 81 4A BC 33
+ * C5 2E E8 1D 7D 77 C0 8A
+ */
+static const uint8_t keys_14[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_14[] = {
+ 0x00, 0x33, 0x56, 0x8E, 0xF7, 0xB2, 0x63, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_14[] = {
+ 0x63, 0x01, 0x8F, 0x76, 0xDC, 0x8A, 0x1B, 0xCB,
+ 0x90, 0x20, 0xEA, 0x6F, 0x91, 0xBD, 0xD8, 0x5A,
+ 0xFA, 0x00, 0x39, 0xBA, 0x4B, 0xAF, 0xF9, 0xBF,
+ 0xB7, 0x9C, 0x70, 0x28, 0x94, 0x9C, 0xD0, 0xEC,
+};
+static const uint8_t packet_out_14[] = {
+ 0x63, 0x01, 0x8F, 0x76, 0xDC, 0x8A, 0x1B, 0xCB,
+ 0x4C, 0xCB, 0x1E, 0x7C, 0xA9, 0x81, 0xBE, 0xFA,
+ 0xA0, 0x72, 0x6C, 0x55, 0xD3, 0x78, 0x06, 0x12,
+ 0x98, 0xC8, 0x5C, 0x92, 0x81, 0x4A, 0xBC, 0x33,
+ 0xC5, 0x2E, 0xE8, 0x1D, 0x7D, 0x77, 0xC0, 0x8A
+};
+#define clear_len_14 8
+#define auth_len_14 8
+
+/*
+ * =============== Packet Vector #15 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 10 3F E4 13 36 71 3C 96 96 76 6C FA
+ * Total packet length = 33. [Input with 8 cleartext header octets]
+ * AA 6C FA 36 CA E8 6B 40 B9 16 E0 EA CC 1C 00 D7
+ * DC EC 68 EC 0B 3B BB 1A 02 DE 8A 2D 1A A3 46 13
+ * 2E
+ * CBC IV in: 59 00 10 3F E4 13 36 71 3C 96 96 76 6C FA 00 19
+ * CBC IV out:B3 26 49 FF D5 9F 56 0F 02 2D 11 E2 62 C5 BE EA
+ * After xor: B3 2E E3 93 2F A9 9C E7 69 6D 11 E2 62 C5 BE EA [hdr]
+ * After AES: 82 50 9E E5 B2 FF DB CA 9B D0 2E 20 6B 3F B7 AD
+ * After xor: 3B 46 7E 0F 7E E3 DB 1D 47 3C 46 CC 60 04 0C B7 [msg]
+ * After AES: 80 46 0E 4C 08 3A D0 3F B9 A9 13 BE E4 DE 2F 66
+ * After xor: 82 98 84 61 12 99 96 2C 97 A9 13 BE E4 DE 2F 66 [msg]
+ * After AES: 47 29 CB 00 31 F1 81 C1 92 68 4B 89 A4 71 50 E7
+ * CBC-MAC : 47 29 CB 00 31 F1 81 C1
+ * CTR Start: 01 00 10 3F E4 13 36 71 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 08 C4 DA C8 EC C1 C0 7B 4C E1 F2 4C 37 5A 47 EE
+ * CTR[0002]: A7 87 2E 6C 6D C4 4E 84 26 02 50 4C 3F A5 73 C5
+ * CTR[MAC ]: E0 5F B2 6E EA 83 B4 C7
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * AA 6C FA 36 CA E8 6B 40 B1 D2 3A 22 20 DD C0 AC
+ * 90 0D 9A A0 3C 61 FC F4 A5 59 A4 41 77 67 08 97
+ * 08 A7 76 79 6E DB 72 35 06
+ */
+static const uint8_t keys_15[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_15[] = {
+ 0x00, 0x10, 0x3F, 0xE4, 0x13, 0x36, 0x71, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_15[] = {
+ 0xAA, 0x6C, 0xFA, 0x36, 0xCA, 0xE8, 0x6B, 0x40,
+ 0xB9, 0x16, 0xE0, 0xEA, 0xCC, 0x1C, 0x00, 0xD7,
+ 0xDC, 0xEC, 0x68, 0xEC, 0x0B, 0x3B, 0xBB, 0x1A,
+ 0x02, 0xDE, 0x8A, 0x2D, 0x1A, 0xA3, 0x46, 0x13,
+ 0x2E
+};
+static const uint8_t packet_out_15[] = {
+ 0xAA, 0x6C, 0xFA, 0x36, 0xCA, 0xE8, 0x6B, 0x40,
+ 0xB1, 0xD2, 0x3A, 0x22, 0x20, 0xDD, 0xC0, 0xAC,
+ 0x90, 0x0D, 0x9A, 0xA0, 0x3C, 0x61, 0xFC, 0xF4,
+ 0xA5, 0x59, 0xA4, 0x41, 0x77, 0x67, 0x08, 0x97,
+ 0x08, 0xA7, 0x76, 0x79, 0x6E, 0xDB, 0x72, 0x35,
+ 0x06
+};
+#define clear_len_15 8
+#define auth_len_15 8
+
+/*
+ * =============== Packet Vector #16 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 76 4C 63 B8 05 8E 3C 96 96 76 6C FA
+ * Total packet length = 31. [Input with 12 cleartext header octets]
+ * D0 D0 73 5C 53 1E 1B EC F0 49 C2 44 12 DA AC 56
+ * 30 EF A5 39 6F 77 0C E1 A6 6B 21 F7 B2 10 1C
+ * CBC IV in: 59 00 76 4C 63 B8 05 8E 3C 96 96 76 6C FA 00 13
+ * CBC IV out:AB DC 4E C9 AA 72 33 97 DF 2D AD 76 33 DE 3B 0D
+ * After xor: AB D0 9E 19 D9 2E 60 89 C4 C1 5D 3F F1 9A 3B 0D [hdr]
+ * After AES: 62 86 F6 2F 23 42 63 B0 1C FD 8C 37 40 74 81 EB
+ * After xor: 70 5C 5A 79 13 AD C6 89 73 8A 80 D6 E6 1F A0 1C [msg]
+ * After AES: 88 95 84 18 CF 79 CA BE EB C0 0C C4 86 E6 01 F7
+ * After xor: 3A 85 98 18 CF 79 CA BE EB C0 0C C4 86 E6 01 F7 [msg]
+ * After AES: C1 85 92 D9 84 CD 67 80 63 D1 D9 6D C1 DF A1 11
+ * CBC-MAC : C1 85 92 D9 84 CD 67 80
+ * CTR Start: 01 00 76 4C 63 B8 05 8E 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 06 08 FF 95 A6 94 D5 59 F4 0B B7 9D EF FA 41 DF
+ * CTR[0002]: 80 55 3A 75 78 38 04 A9 64 8B 68 DD 7F DC DD 7A
+ * CTR[MAC ]: 5B EA DB 4E DF 07 B9 2F
+ * Total packet length = 39. [Authenticated and Encrypted Output]
+ * D0 D0 73 5C 53 1E 1B EC F0 49 C2 44 14 D2 53 C3
+ * 96 7B 70 60 9B 7C BB 7C 49 91 60 28 32 45 26 9A
+ * 6F 49 97 5B CA DE AF
+ */
+static const uint8_t keys_16[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_16[] = {
+ 0x00, 0x76, 0x4C, 0x63, 0xB8, 0x05, 0x8E, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_16[] = {
+ 0xD0, 0xD0, 0x73, 0x5C, 0x53, 0x1E, 0x1B, 0xEC,
+ 0xF0, 0x49, 0xC2, 0x44, 0x12, 0xDA, 0xAC, 0x56,
+ 0x30, 0xEF, 0xA5, 0x39, 0x6F, 0x77, 0x0C, 0xE1,
+ 0xA6, 0x6B, 0x21, 0xF7, 0xB2, 0x10, 0x1C
+};
+static const uint8_t packet_out_16[] = {
+ 0xD0, 0xD0, 0x73, 0x5C, 0x53, 0x1E, 0x1B, 0xEC,
+ 0xF0, 0x49, 0xC2, 0x44, 0x14, 0xD2, 0x53, 0xC3,
+ 0x96, 0x7B, 0x70, 0x60, 0x9B, 0x7C, 0xBB, 0x7C,
+ 0x49, 0x91, 0x60, 0x28, 0x32, 0x45, 0x26, 0x9A,
+ 0x6F, 0x49, 0x97, 0x5B, 0xCA, 0xDE, 0xAF
+};
+#define clear_len_16 12
+#define auth_len_16 8
+
+/*
+ * =============== Packet Vector #17 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 F8 B6 78 09 4E 3B 3C 96 96 76 6C FA
+ * Total packet length = 32. [Input with 12 cleartext header octets]
+ * 77 B6 0F 01 1C 03 E1 52 58 99 BC AE E8 8B 6A 46
+ * C7 8D 63 E5 2E B8 C5 46 EF B5 DE 6F 75 E9 CC 0D
+* CBC IV in: 59 00 F8 B6 78 09 4E 3B 3C 96 96 76 6C FA 00 14
+ * CBC IV out:F4 68 FE 5D B1 53 0B 7A 5A A5 FB 27 40 CF 6E 33
+ * After xor: F4 64 89 EB BE 52 17 79 BB F7 A3 BE FC 61 6E 33 [hdr]
+ * After AES: 23 29 0E 0B 33 45 9A 83 32 2D E4 06 86 67 10 04
+ * After xor: CB A2 64 4D F4 C8 F9 66 1C 95 21 40 69 D2 CE 6B [msg]
+ * After AES: 8F BE D4 0F 8B 89 B7 B8 20 D5 5F E0 3C E2 43 11
+ * After xor: FA 57 18 02 8B 89 B7 B8 20 D5 5F E0 3C E2 43 11 [msg]
+ * After AES: 6A DB 15 B6 71 81 B2 E2 2B E3 4A F2 B2 83 E2 29
+ * CBC-MAC : 6A DB 15 B6 71 81 B2 E2
+ * CTR Start: 01 00 F8 B6 78 09 4E 3B 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: BD CE 95 5C CF D3 81 0A 91 EA 77 A6 A4 5B C0 4C
+ * CTR[0002]: 43 2E F2 32 AE 36 D8 92 22 BF 63 37 E6 B2 6C E8
+ * CTR[MAC ]: 1C F7 19 C1 35 7F CC DE
+ * Total packet length = 40. [Authenticated and Encrypted Output]
+ * 77 B6 0F 01 1C 03 E1 52 58 99 BC AE 55 45 FF 1A
+ * 08 5E E2 EF BF 52 B2 E0 4B EE 1E 23 36 C7 3E 3F
+ * 76 2C 0C 77 44 FE 7E 3C
+ */
+static const uint8_t keys_17[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_17[] = {
+ 0x00, 0xF8, 0xB6, 0x78, 0x09, 0x4E, 0x3B, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_17[] = {
+ 0x77, 0xB6, 0x0F, 0x01, 0x1C, 0x03, 0xE1, 0x52,
+ 0x58, 0x99, 0xBC, 0xAE, 0xE8, 0x8B, 0x6A, 0x46,
+ 0xC7, 0x8D, 0x63, 0xE5, 0x2E, 0xB8, 0xC5, 0x46,
+ 0xEF, 0xB5, 0xDE, 0x6F, 0x75, 0xE9, 0xCC, 0x0D
+};
+static const uint8_t packet_out_17[] = {
+ 0x77, 0xB6, 0x0F, 0x01, 0x1C, 0x03, 0xE1, 0x52,
+ 0x58, 0x99, 0xBC, 0xAE, 0x55, 0x45, 0xFF, 0x1A,
+ 0x08, 0x5E, 0xE2, 0xEF, 0xBF, 0x52, 0xB2, 0xE0,
+ 0x4B, 0xEE, 0x1E, 0x23, 0x36, 0xC7, 0x3E, 0x3F,
+ 0x76, 0x2C, 0x0C, 0x77, 0x44, 0xFE, 0x7E, 0x3C
+};
+#define clear_len_17 12
+#define auth_len_17 8
+
+/*
+ * =============== Packet Vector #18 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 D5 60 91 2D 3F 70 3C 96 96 76 6C FA
+ * Total packet length = 33. [Input with 12 cleartext header octets]
+ * CD 90 44 D2 B7 1F DB 81 20 EA 60 C0 64 35 AC BA
+ * FB 11 A8 2E 2F 07 1D 7C A4 A5 EB D9 3A 80 3B A8
+ * 7F
+ * CBC IV in: 59 00 D5 60 91 2D 3F 70 3C 96 96 76 6C FA 00 15
+ * CBC IV out:BA 37 74 54 D7 20 A4 59 25 97 F6 A3 D1 D6 BA 67
+ * After xor: BA 3B B9 C4 93 F2 13 46 FE 16 D6 49 B1 16 BA 67 [hdr]
+ * After AES: 81 6A 20 20 38 D0 A6 30 CB E0 B7 3C 39 BB CE 05
+ * After xor: E5 5F 8C 9A C3 C1 0E 1E E4 E7 AA 40 9D 1E 25 DC [msg]
+ * After AES: 6D 5C 15 FD 85 2D 5C 3C E3 03 3D 85 DA 57 BD AC
+ * After xor: 57 DC 2E 55 FA 2D 5C 3C E3 03 3D 85 DA 57 BD AC [msg]
+ * After AES: B0 4A 1C 23 BC 39 B6 51 76 FD 5B FF 9B C1 28 5E
+ * CBC-MAC : B0 4A 1C 23 BC 39 B6 51
+ * CTR Start: 01 00 D5 60 91 2D 3F 70 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 64 A2 C5 56 50 CE E0 4C 7A 93 D8 EE F5 43 E8 8E
+ * CTR[0002]: 18 E7 65 AC B7 B0 E9 AF 09 2B D0 20 6C A1 C8 3C
+ * CTR[MAC ]: F7 43 82 79 5C 49 F3 00
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * CD 90 44 D2 B7 1F DB 81 20 EA 60 C0 00 97 69 EC
+ * AB DF 48 62 55 94 C5 92 51 E6 03 57 22 67 5E 04
+ * C8 47 09 9E 5A E0 70 45 51
+ */
+static const uint8_t keys_18[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_18[] = {
+ 0x00, 0xD5, 0x60, 0x91, 0x2D, 0x3F, 0x70, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_18[] = {
+ 0xCD, 0x90, 0x44, 0xD2, 0xB7, 0x1F, 0xDB, 0x81,
+ 0x20, 0xEA, 0x60, 0xC0, 0x64, 0x35, 0xAC, 0xBA,
+ 0xFB, 0x11, 0xA8, 0x2E, 0x2F, 0x07, 0x1D, 0x7C,
+ 0xA4, 0xA5, 0xEB, 0xD9, 0x3A, 0x80, 0x3B, 0xA8,
+ 0x7F
+};
+static const uint8_t packet_out_18[] = {
+ 0xCD, 0x90, 0x44, 0xD2, 0xB7, 0x1F, 0xDB, 0x81,
+ 0x20, 0xEA, 0x60, 0xC0, 0x00, 0x97, 0x69, 0xEC,
+ 0xAB, 0xDF, 0x48, 0x62, 0x55, 0x94, 0xC5, 0x92,
+ 0x51, 0xE6, 0x03, 0x57, 0x22, 0x67, 0x5E, 0x04,
+ 0xC8, 0x47, 0x09, 0x9E, 0x5A, 0xE0, 0x70, 0x45,
+ 0x51
+};
+#define clear_len_18 12
+#define auth_len_18 8
+
+/*
+ * =============== Packet Vector #19 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 42 FF F8 F1 95 1C 3C 96 96 76 6C FA
+ * Total packet length = 31. [Input with 8 cleartext header octets]
+ * D8 5B C7 E6 9F 94 4F B8 8A 19 B9 50 BC F7 1A 01
+ * 8E 5E 67 01 C9 17 87 65 98 09 D6 7D BE DD 18
+ * CBC IV in: 61 00 42 FF F8 F1 95 1C 3C 96 96 76 6C FA 00 17
+ * CBC IV out:44 F7 CC 9C 2B DD 2F 45 F6 38 25 6B 73 6E 1D 7A
+ * After xor: 44 FF 14 C7 EC 3B B0 D1 B9 80 25 6B 73 6E 1D 7A [hdr]
+ * After AES: 57 C3 73 F8 00 AA 5F CC 7B CF 1D 1B DD BB 4C 52
+ * After xor: DD DA CA A8 BC 5D 45 CD F5 91 7A 1A 14 AC CB 37 [msg]
+ * After AES: 42 4E 93 72 72 C8 79 B6 11 C7 A5 9F 47 8D 9F D8
+ * After xor: DA 47 45 0F CC 15 61 B6 11 C7 A5 9F 47 8D 9F D8 [msg]
+ * After AES: 9A CB 03 F8 B9 DB C8 D2 D2 D7 A4 B4 95 25 08 67
+ * CBC-MAC : 9A CB 03 F8 B9 DB C8 D2 D2 D7
+ * CTR Start: 01 00 42 FF F8 F1 95 1C 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 36 38 34 FA 28 83 3D B7 55 66 0D 98 65 0D 68 46
+ * CTR[0002]: 35 E9 63 54 87 16 72 56 3F 0C 08 AF 78 44 31 A9
+ * CTR[MAC ]: F9 B7 FA 46 7B 9B 40 45 14 6D
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * D8 5B C7 E6 9F 94 4F B8 BC 21 8D AA 94 74 27 B6
+ * DB 38 6A 99 AC 1A EF 23 AD E0 B5 29 39 CB 6A 63
+ * 7C F9 BE C2 40 88 97 C6 BA
+ */
+static const uint8_t keys_19[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_19[] = {
+ 0x00, 0x42, 0xFF, 0xF8, 0xF1, 0x95, 0x1C, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_19[] = {
+ 0xD8, 0x5B, 0xC7, 0xE6, 0x9F, 0x94, 0x4F, 0xB8,
+ 0x8A, 0x19, 0xB9, 0x50, 0xBC, 0xF7, 0x1A, 0x01,
+ 0x8E, 0x5E, 0x67, 0x01, 0xC9, 0x17, 0x87, 0x65,
+ 0x98, 0x09, 0xD6, 0x7D, 0xBE, 0xDD, 0x18
+};
+static const uint8_t packet_out_19[] = {
+ 0xD8, 0x5B, 0xC7, 0xE6, 0x9F, 0x94, 0x4F, 0xB8,
+ 0xBC, 0x21, 0x8D, 0xAA, 0x94, 0x74, 0x27, 0xB6,
+ 0xDB, 0x38, 0x6A, 0x99, 0xAC, 0x1A, 0xEF, 0x23,
+ 0xAD, 0xE0, 0xB5, 0x29, 0x39, 0xCB, 0x6A, 0x63,
+ 0x7C, 0xF9, 0xBE, 0xC2, 0x40, 0x88, 0x97, 0xC6,
+ 0xBA
+};
+#define clear_len_19 8
+#define auth_len_19 10
+
+/*
+ * ================= Packet Vector #20 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 92 0F 40 E5 6C DC 3C 96 96 76 6C FA
+ * Total packet length = 32. [Input with 8 cleartext header octets]
+ * 74 A0 EB C9 06 9F 5B 37 17 61 43 3C 37 C5 A3 5F
+ * C1 F3 9F 40 63 02 EB 90 7C 61 63 BE 38 C9 84 37
+ * CBC IV in: 61 00 92 0F 40 E5 6C DC 3C 96 96 76 6C FA 00 18
+ * CBC IV out:60 CB 21 CE 40 06 50 AE 2A D2 BE 52 9F 5F 0F C2
+ * After xor: 60 C3 55 6E AB CF 56 31 71 E5 BE 52 9F 5F 0F C2 [hdr]
+ * After AES: 03 20 64 14 35 32 5D 95 C8 A2 50 40 93 28 DA 9B
+ * After xor: 14 41 27 28 02 F7 FE CA 09 51 CF 00 F0 2A 31 0B [msg]
+ * After AES: B9 E8 87 95 ED F7 F0 08 15 15 F0 14 E2 FE 0E 48
+ * After xor: C5 89 E4 2B D5 3E 74 3F 15 15 F0 14 E2 FE 0E 48 [msg]
+ * After AES: 8F AD 0C 23 E9 63 7E 87 FA 21 45 51 1B 47 DE F1
+ * CBC-MAC : 8F AD 0C 23 E9 63 7E 87 FA 21
+ * CTR Start: 01 00 92 0F 40 E5 6C DC 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 4F 71 A5 C1 12 42 E3 7D 29 F0 FE E4 1B E1 02 5F
+ * CTR[0002]: 34 2B D3 F1 7C B7 7B C1 79 0B 05 05 61 59 27 2C
+ * CTR[MAC ]: 7F 09 7B EF C6 AA C1 D3 73 65
+ * Total packet length = 42. [Authenticated and Encrypted Output]
+ * 74 A0 EB C9 06 9F 5B 37 58 10 E6 FD 25 87 40 22
+ * E8 03 61 A4 78 E3 E9 CF 48 4A B0 4F 44 7E FF F6
+ * F0 A4 77 CC 2F C9 BF 54 89 44
+ */
+static const uint8_t keys_20[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_20[] = {
+ 0x00, 0x92, 0x0F, 0x40, 0xE5, 0x6C, 0xDC, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_20[] = {
+ 0x74, 0xA0, 0xEB, 0xC9, 0x06, 0x9F, 0x5B, 0x37,
+ 0x17, 0x61, 0x43, 0x3C, 0x37, 0xC5, 0xA3, 0x5F,
+ 0xC1, 0xF3, 0x9F, 0x40, 0x63, 0x02, 0xEB, 0x90,
+ 0x7C, 0x61, 0x63, 0xBE, 0x38, 0xC9, 0x84, 0x37
+};
+static const uint8_t packet_out_20[] = {
+ 0x74, 0xA0, 0xEB, 0xC9, 0x06, 0x9F, 0x5B, 0x37,
+ 0x58, 0x10, 0xE6, 0xFD, 0x25, 0x87, 0x40, 0x22,
+ 0xE8, 0x03, 0x61, 0xA4, 0x78, 0xE3, 0xE9, 0xCF,
+ 0x48, 0x4A, 0xB0, 0x4F, 0x44, 0x7E, 0xFF, 0xF6,
+ 0xF0, 0xA4, 0x77, 0xCC, 0x2F, 0xC9, 0xBF, 0x54,
+ 0x89, 0x44
+};
+#define clear_len_20 8
+#define auth_len_20 10
+
+/*
+ * =============== Packet Vector #21 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 27 CA 0C 71 20 BC 3C 96 96 76 6C FA
+ * Total packet length = 33. [Input with 8 cleartext header octets]
+ * 44 A3 AA 3A AE 64 75 CA A4 34 A8 E5 85 00 C6 E4
+ * 15 30 53 88 62 D6 86 EA 9E 81 30 1B 5A E4 22 6B
+ * FA
+ * CBC IV in: 61 00 27 CA 0C 71 20 BC 3C 96 96 76 6C FA 00 19
+ * CBC IV out:43 07 C0 73 A8 9E E1 D5 05 27 B2 9A 62 48 D6 D2
+ * After xor: 43 0F 84 D0 02 A4 4F B1 70 ED B2 9A 62 48 D6 D2 [hdr]
+ * After AES: B6 0B C6 F5 84 01 75 BC 01 27 70 F1 11 8D 75 10
+ * After xor: 12 3F 6E 10 01 01 B3 58 14 17 23 79 73 5B F3 FA [msg]
+ * After AES: 7D 5E 64 92 CE 2C B9 EA 7E 4C 4A 09 09 89 C8 FB
+ * After xor: E3 DF 54 89 94 C8 9B 81 84 4C 4A 09 09 89 C8 FB [msg]
+ * After AES: 68 5F 8D 79 D2 2B 9B 74 21 DF 4C 3E 87 BA 0A AF
+ * CBC-MAC : 68 5F 8D 79 D2 2B 9B 74 21 DF
+ * CTR Start: 01 00 27 CA 0C 71 20 BC 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 56 8A 45 9E 40 09 48 67 EB 85 E0 9E 6A 2E 64 76
+ * CTR[0002]: A6 00 AA 92 92 03 54 9A AE EF 2C CC 59 13 7A 57
+ * CTR[MAC ]: 25 1E DC DD 3F 11 10 F3 98 11
+ * Total packet length = 43. [Authenticated and Encrypted Output]
+ * 44 A3 AA 3A AE 64 75 CA F2 BE ED 7B C5 09 8E 83
+ * FE B5 B3 16 08 F8 E2 9C 38 81 9A 89 C8 E7 76 F1
+ * 54 4D 41 51 A4 ED 3A 8B 87 B9 CE
+ */
+static const uint8_t keys_21[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_21[] = {
+ 0x00, 0x27, 0xCA, 0x0C, 0x71, 0x20, 0xBC, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_21[] = {
+ 0x44, 0xA3, 0xAA, 0x3A, 0xAE, 0x64, 0x75, 0xCA,
+ 0xA4, 0x34, 0xA8, 0xE5, 0x85, 0x00, 0xC6, 0xE4,
+ 0x15, 0x30, 0x53, 0x88, 0x62, 0xD6, 0x86, 0xEA,
+ 0x9E, 0x81, 0x30, 0x1B, 0x5A, 0xE4, 0x22, 0x6B,
+ 0xFA
+};
+static const uint8_t packet_out_21[] = {
+ 0x44, 0xA3, 0xAA, 0x3A, 0xAE, 0x64, 0x75, 0xCA,
+ 0xF2, 0xBE, 0xED, 0x7B, 0xC5, 0x09, 0x8E, 0x83,
+ 0xFE, 0xB5, 0xB3, 0x16, 0x08, 0xF8, 0xE2, 0x9C,
+ 0x38, 0x81, 0x9A, 0x89, 0xC8, 0xE7, 0x76, 0xF1,
+ 0x54, 0x4D, 0x41, 0x51, 0xA4, 0xED, 0x3A, 0x8B,
+ 0x87, 0xB9, 0xCE
+};
+#define clear_len_21 8
+#define auth_len_21 10
+
+/*
+ * =============== Packet Vector #22 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 5B 8C CB CD 9A F8 3C 96 96 76 6C FA
+ * Total packet length = 31. [Input with 12 cleartext header octets]
+ * EC 46 BB 63 B0 25 20 C3 3C 49 FD 70 B9 6B 49 E2
+ * 1D 62 17 41 63 28 75 DB 7F 6C 92 43 D2 D7 C2
+ * CBC IV in: 61 00 5B 8C CB CD 9A F8 3C 96 96 76 6C FA 00 13
+ * CBC IV out:91 14 AD 06 B6 CC 02 35 76 9A B6 14 C4 82 95 03
+ * After xor: 91 18 41 40 0D AF B2 10 56 59 8A 5D 39 F2 95 03 [hdr]
+ * After AES: 29 BD 7C 27 83 E3 E8 D3 C3 5C 01 F4 4C EC BB FA
+ * After xor: 90 D6 35 C5 9E 81 FF 92 A0 74 74 2F 33 80 29 B9 [msg]
+ * After AES: 4E DA F4 0D 21 0B D4 5F FE 97 90 B9 AA EC 34 4C
+ * After xor: 9C 0D 36 0D 21 0B D4 5F FE 97 90 B9 AA EC 34 4C [msg]
+ * After AES: 21 9E F8 90 EA 64 C2 11 A5 37 88 83 E1 BA 22 0D
+ * CBC-MAC : 21 9E F8 90 EA 64 C2 11 A5 37
+ * CTR Start: 01 00 5B 8C CB CD 9A F8 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 88 BC 19 42 80 C1 FA 3E BE FC EF FB 4D C6 2D 54
+ * CTR[0002]: 3E 59 7D A5 AE 21 CC A4 00 9E 4C 0C 91 F6 22 49
+ * CTR[MAC ]: 5C BC 30 98 66 02 A9 F4 64 A0
+ * Total packet length = 41. [Authenticated and Encrypted Output]
+ * EC 46 BB 63 B0 25 20 C3 3C 49 FD 70 31 D7 50 A0
+ * 9D A3 ED 7F DD D4 9A 20 32 AA BF 17 EC 8E BF 7D
+ * 22 C8 08 8C 66 6B E5 C1 97
+ */
+static const uint8_t keys_22[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_22[] = {
+ 0x00, 0x5B, 0x8C, 0xCB, 0xCD, 0x9A, 0xF8, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_22[] = {
+ 0xEC, 0x46, 0xBB, 0x63, 0xB0, 0x25, 0x20, 0xC3,
+ 0x3C, 0x49, 0xFD, 0x70, 0xB9, 0x6B, 0x49, 0xE2,
+ 0x1D, 0x62, 0x17, 0x41, 0x63, 0x28, 0x75, 0xDB,
+ 0x7F, 0x6C, 0x92, 0x43, 0xD2, 0xD7, 0xC2
+};
+static const uint8_t packet_out_22[] = {
+ 0xEC, 0x46, 0xBB, 0x63, 0xB0, 0x25, 0x20, 0xC3,
+ 0x3C, 0x49, 0xFD, 0x70, 0x31, 0xD7, 0x50, 0xA0,
+ 0x9D, 0xA3, 0xED, 0x7F, 0xDD, 0xD4, 0x9A, 0x20,
+ 0x32, 0xAA, 0xBF, 0x17, 0xEC, 0x8E, 0xBF, 0x7D,
+ 0x22, 0xC8, 0x08, 0x8C, 0x66, 0x6B, 0xE5, 0xC1,
+ 0x97
+};
+#define clear_len_22 12
+#define auth_len_22 10
+
+
+/*
+ * =============== Packet Vector #23 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 3E BE 94 04 4B 9A 3C 96 96 76 6C FA
+ * Total packet length = 32. [Input with 12 cleartext header octets]
+ * 47 A6 5A C7 8B 3D 59 42 27 E8 5E 71 E2 FC FB B8
+ * 80 44 2C 73 1B F9 51 67 C8 FF D7 89 5E 33 70 76
+ * CBC IV in: 61 00 3E BE 94 04 4B 9A 3C 96 96 76 6C FA 00 14
+ * CBC IV out:0F 70 3F 5A 54 2C 44 6E 8B 74 A3 73 9B 48 B9 61
+ * After xor: 0F 7C 78 FC 0E EB CF 53 D2 36 84 9B C5 39 B9 61 [hdr]
+ * After AES: 40 5B ED 29 D0 98 AE 91 DB 68 78 F3 68 B8 73 85
+ * After xor: A2 A7 16 91 50 DC 82 E2 C0 91 29 94 A0 47 A4 0C [msg]
+ * After AES: 3D 03 29 3C FD 81 1B 37 01 51 FB C7 85 6B 7A 74
+ * After xor: 63 30 59 4A FD 81 1B 37 01 51 FB C7 85 6B 7A 74 [msg]
+ * After AES: 66 4F 27 16 3E 36 0F 72 62 0D 4E 67 7C E0 61 DE
+ * CBC-MAC : 66 4F 27 16 3E 36 0F 72 62 0D
+ * CTR Start: 01 00 3E BE 94 04 4B 9A 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 0A 7E 0A 63 53 C8 CF 9E BC 3B 6E 63 15 9A D0 97
+ * CTR[0002]: EA 20 32 DA 27 82 6E 13 9E 1E 72 5C 5B 0D 3E BF
+ * CTR[MAC ]: B9 31 27 CA F0 F1 A1 20 FA 70
+ * Total packet length = 42. [Authenticated and Encrypted Output]
+ * 47 A6 5A C7 8B 3D 59 42 27 E8 5E 71 E8 82 F1 DB
+ * D3 8C E3 ED A7 C2 3F 04 DD 65 07 1E B4 13 42 AC
+ * DF 7E 00 DC CE C7 AE 52 98 7D
+ */
+static const uint8_t keys_23[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_23[] = {
+ 0x00, 0x3E, 0xBE, 0x94, 0x04, 0x4B, 0x9A, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_23[] = {
+ 0x47, 0xA6, 0x5A, 0xC7, 0x8B, 0x3D, 0x59, 0x42,
+ 0x27, 0xE8, 0x5E, 0x71, 0xE2, 0xFC, 0xFB, 0xB8,
+ 0x80, 0x44, 0x2C, 0x73, 0x1B, 0xF9, 0x51, 0x67,
+ 0xC8, 0xFF, 0xD7, 0x89, 0x5E, 0x33, 0x70, 0x76
+};
+static const uint8_t packet_out_23[] = {
+ 0x47, 0xA6, 0x5A, 0xC7, 0x8B, 0x3D, 0x59, 0x42,
+ 0x27, 0xE8, 0x5E, 0x71, 0xE8, 0x82, 0xF1, 0xDB,
+ 0xD3, 0x8C, 0xE3, 0xED, 0xA7, 0xC2, 0x3F, 0x04,
+ 0xDD, 0x65, 0x07, 0x1E, 0xB4, 0x13, 0x42, 0xAC,
+ 0xDF, 0x7E, 0x00, 0xDC, 0xCE, 0xC7, 0xAE, 0x52,
+ 0x98, 0x7D
+};
+#define clear_len_23 12
+#define auth_len_23 10
+
+/*
+ * =============== Packet Vector #24 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA
+ * Total packet length = 33. [Input with 12 cleartext header octets]
+ * 6E 37 A6 EF 54 6D 95 5D 34 AB 60 59 AB F2 1C 0B
+ * 02 FE B8 8F 85 6D F4 A3 73 81 BC E3 CC 12 85 17
+ * D4
+ * CBC IV in: 61 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA 00 15
+ * CBC IV out:67 AC E4 E8 06 77 7A D3 27 1D 0B 93 4C 67 98 15
+ * After xor: 67 A0 8A DF A0 98 2E BE B2 40 3F 38 2C 3E 98 15 [hdr]
+ * After AES: 35 58 F8 7E CA C2 B4 39 B6 7E 75 BB F1 5E 69 08
+ * After xor: 9E AA E4 75 C8 3C 0C B6 33 13 81 18 82 DF D5 EB [msg]
+ * After AES: 54 E4 7B 62 22 F0 BB 87 17 D0 71 6A EB AF 19 9E
+ * After xor: 98 F6 FE 75 F6 F0 BB 87 17 D0 71 6A EB AF 19 9E [msg]
+ * After AES: 23 E3 30 50 BC 57 DC 2C 3D 3E 7C 94 77 D1 49 71
+ * CBC-MAC : 23 E3 30 50 BC 57 DC 2C 3D 3E
+ * CTR Start: 01 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 58 DB 19 B3 88 9A A3 8B 3C A4 0B 16 FF 42 2C 73
+ * CTR[0002]: C3 2F 24 3D 65 DC 7E 9F 4B 02 16 AB 7F B9 6B 4D
+ * CTR[MAC ]: 4E 2D AE D2 53 F6 B1 8A 1D 67
+ * Total packet length = 43. [Authenticated and Encrypted Output]
+ * 6E 37 A6 EF 54 6D 95 5D 34 AB 60 59 F3 29 05 B8
+ * 8A 64 1B 04 B9 C9 FF B5 8C C3 90 90 0F 3D A1 2A
+ * B1 6D CE 9E 82 EF A1 6D A6 20 59
+ */
+static const uint8_t keys_24[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_24[] = {
+ 0x00, 0x8D, 0x49, 0x3B, 0x30, 0xAE, 0x8B, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_24[] = {
+ 0x6E, 0x37, 0xA6, 0xEF, 0x54, 0x6D, 0x95, 0x5D,
+ 0x34, 0xAB, 0x60, 0x59, 0xAB, 0xF2, 0x1C, 0x0B,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0xD4
+};
+static const uint8_t packet_out_24[] = {
+ 0x6E, 0x37, 0xA6, 0xEF, 0x54, 0x6D, 0x95, 0x5D,
+ 0x34, 0xAB, 0x60, 0x59, 0xF3, 0x29, 0x05, 0xB8,
+ 0x8A, 0x64, 0x1B, 0x04, 0xB9, 0xC9, 0xFF, 0xB5,
+ 0x8C, 0xC3, 0x90, 0x90, 0x0F, 0x3D, 0xA1, 0x2A,
+ 0xB1, 0x6D, 0xCE, 0x9E, 0x82, 0xEF, 0xA1, 0x6D,
+ 0xA6, 0x20, 0x59
+};
+#define clear_len_24 12
+#define auth_len_24 10
+
+/*
+ * =============== Packet Vector #25 ==================
+ * AES Key = D7 82 8D 13 B2 B0 BD C3 25 A7 62 36 DF 93 CC 6B
+ * Nonce = 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA
+ * Total packet length = 33. [Input with 12 cleartext header octets]
+ * 6E 37 A6 EF 54 6D 95 5D 34 AB 60 59 AB F2 1C 0B
+ * 02 FE B8 8F 85 6D F4 A3 73 81 BC E3 CC 12 85 17
+ * D4
+ * CBC IV in: 61 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA 00 15
+ * CBC IV out:67 AC E4 E8 06 77 7A D3 27 1D 0B 93 4C 67 98 15
+ * After xor: 67 A0 8A DF A0 98 2E BE B2 40 3F 38 2C 3E 98 15 [hdr]
+ * After AES: 35 58 F8 7E CA C2 B4 39 B6 7E 75 BB F1 5E 69 08
+ * After xor: 9E AA E4 75 C8 3C 0C B6 33 13 81 18 82 DF D5 EB [msg]
+ * After AES: 54 E4 7B 62 22 F0 BB 87 17 D0 71 6A EB AF 19 9E
+ * After xor: 98 F6 FE 75 F6 F0 BB 87 17 D0 71 6A EB AF 19 9E [msg]
+ * After AES: 23 E3 30 50 BC 57 DC 2C 3D 3E 7C 94 77 D1 49 71
+ * CBC-MAC : 23 E3 30 50 BC 57 DC 2C 3D 3E
+ * CTR Start: 01 00 8D 49 3B 30 AE 8B 3C 96 96 76 6C FA 00 01
+ * CTR[0001]: 58 DB 19 B3 88 9A A3 8B 3C A4 0B 16 FF 42 2C 73
+ * CTR[0002]: C3 2F 24 3D 65 DC 7E 9F 4B 02 16 AB 7F B9 6B 4D
+ * CTR[MAC ]: 4E 2D AE D2 53 F6 B1 8A 1D 67
+ * Total packet length = 43. [Authenticated and Encrypted Output]
+ * 6E 37 A6 EF 54 6D 95 5D 34 AB 60 59 F3 29 05 B8
+ * 8A 64 1B 04 B9 C9 FF B5 8C C3 90 90 0F 3D A1 2A
+ * B1 6D CE 9E 82 EF A1 6D A6 20 59
+ */
+static const uint8_t keys_25[] = {
+ 0xD7, 0x82, 0x8D, 0x13, 0xB2, 0xB0, 0xBD, 0xC3,
+ 0x25, 0xA7, 0x62, 0x36, 0xDF, 0x93, 0xCC, 0x6B
+};
+static const uint8_t nonce_25[] = {
+ 0x00, 0x8D, 0x49, 0x3B, 0x30, 0xAE, 0x8B, 0x3C,
+ 0x96, 0x96, 0x76, 0x6C, 0xFA
+};
+static const uint8_t packet_in_25[] = {
+ 0x6E, 0x37, 0xA6, 0xEF, 0x54, 0x6D, 0x95, 0x5D,
+ 0x34, 0xAB, 0x60, 0x59, 0xAB, 0xF2, 0x1C, 0x0B,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+ 0x73, 0x81, 0xBC, 0xE3, 0xCC, 0x12, 0x85, 0x17,
+ 0x02, 0xFE, 0xB8, 0x8F, 0x85, 0x6D, 0xF4, 0xA3,
+};
+static const uint8_t packet_out_25[] = {
+ 0x6E, 0x37, 0xA6, 0xEF, 0x54, 0x6D, 0x95, 0x5D,
+ 0x34, 0xAB, 0x60, 0x59,
+ 0xF3, 0x29, 0x05, 0xB8, 0x8A, 0x64, 0x1B, 0x04,
+ 0xB9, 0xC9, 0xFF, 0xB5, 0x8C, 0xC3, 0x90, 0x90,
+ 0x0F, 0x3D, 0xA1, 0x2A, 0x67, 0x22, 0xC6, 0x10,
+ 0xCE, 0x6F, 0xE2, 0x08, 0x0C, 0x38, 0xD7, 0xAE,
+ 0x61, 0x82, 0x21, 0x3D, 0x59, 0x67, 0xF3, 0x26,
+ 0x72, 0x53, 0x78, 0x37, 0xE4, 0xBA, 0xA7, 0xED,
+ 0x92, 0x52, 0xD6, 0x01, 0xA5, 0x66, 0x52, 0x20,
+ 0x6C, 0x51, 0xDB, 0x15, 0x6C, 0xF8, 0x59, 0x38,
+ 0xAF, 0x28, 0x4D, 0xB7, 0x5F, 0x2A, 0xC8, 0xB0,
+ 0x6E, 0x37, 0x77, 0x89, 0xB0, 0x6D, 0xC3, 0xE5,
+ 0xF2, 0x2A, 0xA6, 0xF7, 0xDB, 0xCC, 0x37, 0x79,
+ 0x88, 0x0E, 0x8F, 0x05, 0xA9, 0xE1, 0x9E, 0x11,
+ 0x90, 0x3D, 0x2A, 0xB9, 0x70, 0x13, 0x9D, 0xC3,
+ 0x93, 0xAD, 0xE2, 0x5D, 0xFF, 0xDD, 0x19, 0x0F,
+ 0xB9, 0x9E, 0x74, 0x88, 0xAB, 0x82, 0x0C, 0xA0,
+ 0x2E, 0x71, 0xA6, 0x32, 0xEB, 0xA4, 0xBA, 0x10,
+ 0xF6, 0x61, 0x7B, 0x1B, 0x2A, 0x38, 0x80, 0xEB,
+ 0xE9, 0x09, 0x01, 0x33, 0x94, 0x8F, 0xB3, 0xD4,
+ 0xAD, 0x6C, 0xD8, 0x5E, 0x85, 0x98, 0xC5, 0x9C,
+ 0x11, 0x62, 0x2C, 0x60, 0x32, 0xAE, 0x70, 0xE1,
+ 0x66, 0x73, 0x09, 0x1E, 0x20, 0x55, 0xB7, 0x20,
+ 0x77, 0x86, 0x09, 0xC8, 0x1C, 0xFE, 0x86, 0xA7,
+ 0x08, 0x40, 0x43, 0xE7, 0xAD, 0xB2, 0x5B, 0x39,
+ 0x64, 0xCB, 0x13, 0x1F, 0x8D, 0xD2, 0x4F, 0xCC,
+ 0xC5, 0xAA, 0xF1, 0xD6, 0x31, 0xFC, 0x34, 0x9E,
+ 0x5F, 0x90, 0xC4, 0xB7, 0xE0, 0x07, 0x9C, 0xCD,
+ 0xFB, 0xEA, 0xE3, 0x75, 0xB5, 0x7B, 0x29, 0xD4,
+ 0x73, 0x81, 0xEF, 0x9C, 0x2E, 0xAC, 0xF9, 0xA7,
+ 0x39, 0x2A, 0xF8, 0xE2, 0xEA, 0x3A, 0x6A, 0xDF,
+ 0xD0, 0x3A, 0xCA, 0x29, 0xD3, 0x13, 0x13, 0x9A,
+ 0x2C, 0x70, 0xA4, 0xA9, 0x40, 0x1D, 0xEC, 0xC7,
+ 0xC9, 0x6B, 0xF7, 0x23, 0xD4, 0x53, 0x49, 0xD0,
+ 0x05, 0xCE, 0x15, 0x65, 0xCE, 0x1F, 0x89, 0xE2,
+ 0xBE, 0xE0, 0xFA, 0x3F, 0x59, 0x4A, 0x89, 0x99,
+ 0xE5, 0xDB, 0xA0, 0xE8, 0x54, 0x72, 0x42, 0x69,
+ 0x79, 0x63, 0x68, 0x91, 0xC9, 0x2C, 0xFC, 0x58,
+ 0xD4, 0x30, 0xFE, 0xE3, 0x62, 0x5F, 0xDC, 0x49,
+ 0xEF, 0x32, 0x58, 0x83, 0x27, 0xA9, 0xED, 0xEC,
+ 0xF3, 0x1D, 0xFB, 0xEA, 0x0A, 0x89
+};
+#define clear_len_25 12
+#define auth_len_25 10
+
+/** Additional AES-CCM-128 test vectors */
+static const uint8_t keys_90[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_90[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_90[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23
+};
+static const uint8_t packet_out_90[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0x4D, 0xAC, 0x25, 0x5D
+};
+#define clear_len_90 8
+#define auth_len_90 4
+
+static const uint8_t keys_91[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_91[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_91[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00,
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_91[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00,
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x78, 0x45, 0xCE, 0x0B, 0x16, 0xF9, 0x76, 0x23
+};
+#define clear_len_91 22
+#define auth_len_91 8
+
+
+static const uint8_t keys_92[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_92[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_92[] = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_92[] = {
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x41, 0x83, 0x21, 0x89, 0xA3, 0xD3, 0x1B, 0x43
+};
+#define clear_len_92 0
+#define auth_len_92 8
+
+static const uint8_t keys_100[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_100[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_100[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_100[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0xB0, 0xC9, 0x5E, 0x58, 0x03, 0x6E
+};
+#define clear_len_100 8
+#define auth_len_100 6
+
+static const uint8_t keys_101[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_101[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_101[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_101[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0xD0, 0xAD, 0x86, 0xFD, 0x33, 0xC2, 0x69, 0x86
+};
+#define clear_len_101 8
+#define auth_len_101 8
+
+static const uint8_t keys_102[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_102[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_102[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_102[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0x05, 0x12, 0xDA, 0xBF, 0xD9, 0x72, 0xA6, 0x68,
+ 0x53, 0xC1
+};
+#define clear_len_102 8
+#define auth_len_102 10
+
+static const uint8_t keys_103[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_103[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_103[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_103[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0xBA, 0x03, 0xBF, 0x8C, 0xE0, 0xD6, 0x00, 0xA4,
+ 0x48, 0x6F, 0xCC, 0xB3
+};
+#define clear_len_103 8
+#define auth_len_103 12
+
+static const uint8_t keys_104[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_104[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_104[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_104[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0x6B, 0x9B, 0xFB, 0xFE, 0xA8, 0x2C, 0x04, 0x77,
+ 0x8E, 0x67, 0xF5, 0x18, 0x46, 0xC6
+};
+#define clear_len_104 8
+#define auth_len_104 14
+
+static const uint8_t keys_105[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+};
+static const uint8_t nonce_105[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+};
+static const uint8_t packet_in_105[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t packet_out_105[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x71, 0x62, 0x01, 0x5B,
+ 0x2B, 0xB5, 0x7C, 0x0A, 0xF4, 0x5E, 0x4D, 0x83,
+ 0x04, 0xF0, 0x5F, 0x45, 0x99, 0x3F, 0x15, 0x17
+};
+#define clear_len_105 8
+#define auth_len_105 16
+
+static const uint8_t keys_106[] = {
+ 0x4a, 0xe7, 0x01, 0x10, 0x3c, 0x63, 0xde, 0xca,
+ 0x5b, 0x5a, 0x39, 0x39, 0xd7, 0xd0, 0x59, 0x92
+};
+static const uint8_t nonce_106[] = {
+ 0x5a, 0x8a, 0xa4, 0x85, 0xc3, 0x16, 0xe9
+};
+static const uint8_t packet_out_106[] = {
+ 0x02, 0x20, 0x9f, 0x55
+};
+#define clear_len_106 0
+#define auth_len_106 4
+
+static const uint8_t keys_107[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_107[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_107[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08,
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_107[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08,
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x60, 0x76, 0xE8, 0xE2, 0x0C, 0x0A, 0xF6, 0xDF
+};
+#define clear_len_107 14
+#define auth_len_107 8
+
+static const uint8_t keys_108[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_108[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_108[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_108[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x35, 0x0D, 0xA5, 0xAA, 0x1E, 0x71, 0x82, 0x35
+};
+#define clear_len_108 32
+#define auth_len_108 8
+
+static const uint8_t keys_109[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_109[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_109[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0x00, 0x01, 0x02, 0x03,
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_109[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0x00, 0x01, 0x02, 0x03,
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x26, 0x5A, 0x04, 0xB1, 0x56, 0xFF, 0x9F, 0x0E
+};
+#define clear_len_109 36
+#define auth_len_109 8
+
+static const uint8_t keys_110[] = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+};
+static const uint8_t nonce_110[] = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+};
+static const uint8_t packet_in_110[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+};
+static const uint8_t packet_out_110[] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00, 0x08, 0x40,
+ 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C, 0x50, 0x30,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19,
+ 0x07, 0x55, 0x13, 0x40, 0x2B, 0x11, 0x6D, 0xD5
+};
+#define clear_len_110 46
+#define auth_len_110 8
+
+#define CCM_TEST_VEC(num) \
+ { keys_##num, nonce_##num, sizeof(nonce_##num), \
+ packet_in_##num, sizeof(packet_in_##num), \
+ clear_len_##num, packet_out_##num, \
+ auth_len_##num }
+#define CCM_TEST_VEC_2(num) \
+ { keys_##num, nonce_##num, sizeof(nonce_##num), \
+ NULL, 0, \
+ clear_len_##num, packet_out_##num, \
+ auth_len_##num }
+
+static const struct ccm_rfc3610_vector {
+ const uint8_t *keys;
+ const uint8_t *nonce;
+ const size_t nonce_len;
+ /* packet in = [ AAD | plain text ] */
+ const uint8_t *packet_in;
+ const size_t packet_len;
+ const size_t clear_len;
+ /* packet out = [ AAD | cipher text | authentication tag ] */
+ const uint8_t *packet_out;
+ const size_t auth_len;
+} ccm_vectors[] = {
+ CCM_TEST_VEC(01),
+ CCM_TEST_VEC(02),
+ CCM_TEST_VEC(03),
+ CCM_TEST_VEC(04),
+ CCM_TEST_VEC(05),
+ CCM_TEST_VEC(06),
+ CCM_TEST_VEC(07),
+ CCM_TEST_VEC(08),
+ CCM_TEST_VEC(09),
+ CCM_TEST_VEC(10),
+ CCM_TEST_VEC(11),
+ CCM_TEST_VEC(12),
+ CCM_TEST_VEC(13),
+ CCM_TEST_VEC(14),
+ CCM_TEST_VEC(15),
+ CCM_TEST_VEC(16),
+ CCM_TEST_VEC(17),
+ CCM_TEST_VEC(18),
+ CCM_TEST_VEC(19),
+ CCM_TEST_VEC(20),
+ CCM_TEST_VEC(21),
+ CCM_TEST_VEC(22),
+ CCM_TEST_VEC(23),
+ CCM_TEST_VEC(24),
+ CCM_TEST_VEC(25),
+ CCM_TEST_VEC(90),
+ CCM_TEST_VEC(91),
+ CCM_TEST_VEC(92),
+ CCM_TEST_VEC(100),
+ CCM_TEST_VEC(101),
+ CCM_TEST_VEC(102),
+ CCM_TEST_VEC(103),
+ CCM_TEST_VEC(104),
+ CCM_TEST_VEC(105),
+ CCM_TEST_VEC_2(106),
+ CCM_TEST_VEC(107),
+ CCM_TEST_VEC(108),
+ CCM_TEST_VEC(109),
+ CCM_TEST_VEC(110)
+};
+
+static int
+ccm_job_ok(const struct ccm_rfc3610_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const uint8_t *target,
+ const uint8_t *padding,
+ const uint8_t *auth,
+ const size_t sizeof_padding,
+ const int dir,
+ const int in_place)
+{
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d", __LINE__, job->status);
+ return 0;
+ }
+
+ /* cipher checks */
+ if (in_place) {
+ if (dir == ENCRYPT) {
+ if (memcmp(vec->packet_out, target + sizeof_padding,
+ vec->packet_len)) {
+ printf("cipher mismatched\n");
+ hexdump(stderr, "Received",
+ target + sizeof_padding,
+ vec->packet_len);
+ hexdump(stderr, "Expected",
+ vec->packet_out, vec->packet_len);
+ return 0;
+ }
+ } else {
+ if (memcmp(vec->packet_in, target + sizeof_padding,
+ vec->packet_len)) {
+ printf("cipher mismatched\n");
+ hexdump(stderr, "Received",
+ target + sizeof_padding,
+ vec->packet_len);
+ hexdump(stderr, "Expected", vec->packet_in,
+ vec->packet_len);
+ return 0;
+ }
+ }
+ } else { /* out-of-place */
+ if (dir == ENCRYPT) {
+ if (memcmp(vec->packet_out + vec->clear_len,
+ target + sizeof_padding,
+ vec->packet_len - vec->clear_len)) {
+ printf("cipher mismatched\n");
+ hexdump(stderr, "Received",
+ target + sizeof_padding,
+ vec->packet_len - vec->clear_len);
+ hexdump(stderr, "Expected",
+ vec->packet_out + vec->clear_len,
+ vec->packet_len - vec->clear_len);
+ return 0;
+ }
+ } else {
+ if (memcmp(vec->packet_in + vec->clear_len,
+ target + sizeof_padding,
+ vec->packet_len - vec->clear_len)) {
+ printf("cipher mismatched\n");
+ hexdump(stderr, "Received",
+ target + sizeof_padding,
+ vec->packet_len - vec->clear_len);
+ hexdump(stderr, "Expected",
+ vec->packet_in + vec->clear_len,
+ vec->packet_len - vec->clear_len);
+ return 0;
+ }
+ }
+ }
+
+ if (memcmp(padding, target, sizeof_padding)) {
+ printf("cipher overwrite head\n");
+ hexdump(stderr, "Target", target, sizeof(padding));
+ return 0;
+ }
+
+ if (in_place) {
+ if (memcmp(padding, target + sizeof_padding + vec->packet_len,
+ sizeof_padding)) {
+ printf("cipher overwrite tail\n");
+ hexdump(stderr, "Target",
+ target + sizeof_padding + vec->packet_len,
+ sizeof_padding);
+ return 0;
+ }
+ } else {
+ if (memcmp(padding, target + sizeof_padding + vec->packet_len -
+ vec->clear_len, sizeof_padding)) {
+ printf("cipher overwrite tail\n");
+ hexdump(stderr, "Target", target + sizeof_padding +
+ vec->packet_len - vec->clear_len,
+ sizeof_padding);
+ return 0;
+ }
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + vec->auth_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + vec->auth_len], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(vec->packet_out + vec->packet_len, &auth[sizeof_padding],
+ vec->auth_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding],
+ vec->auth_len);
+ hexdump(stderr, "Expected", vec->packet_out + vec->packet_len,
+ vec->auth_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_ccm(struct MB_MGR *mb_mgr,
+ const struct ccm_rfc3610_vector *vec,
+ const int dir, const int in_place, const int num_jobs)
+{
+ DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+ DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **targets = malloc(num_jobs * sizeof(void *));
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+ const int order = (dir == ENCRYPT) ? HASH_CIPHER : CIPHER_HASH;
+
+ if (targets == NULL || auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(targets, 0, num_jobs * sizeof(void *));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ targets[i] = malloc(vec->packet_len + (sizeof(padding) * 2));
+ auths[i] = malloc(16 + (sizeof(padding) * 2));
+ if (targets[i] == NULL || auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+
+ memset(targets[i], -1, vec->packet_len + (sizeof(padding) * 2));
+ memset(auths[i], -1, 16 + (sizeof(padding) * 2));
+
+ if (in_place) {
+ if (dir == ENCRYPT)
+ memcpy(targets[i] + sizeof(padding),
+ vec->packet_in, vec->packet_len);
+ else
+ memcpy(targets[i] + sizeof(padding),
+ vec->packet_out, vec->packet_len);
+ }
+ }
+
+ IMB_AES_KEYEXP_128(mb_mgr, vec->keys, expkey, dust);
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (in_place) {
+ job->dst =
+ targets[i] + sizeof(padding) + vec->clear_len;
+ job->src = targets[i] + sizeof(padding);
+ } else {
+ if (dir == ENCRYPT) {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = vec->packet_in;
+ } else {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = vec->packet_out;
+ }
+ }
+ job->cipher_mode = CCM;
+ job->aes_enc_key_expanded = expkey;
+ job->aes_dec_key_expanded = expkey;
+ job->aes_key_len_in_bytes = 16; /* AES-CCM-128 for now */
+ job->iv = vec->nonce;
+ job->iv_len_in_bytes = vec->nonce_len;
+ job->cipher_start_src_offset_in_bytes = vec->clear_len;
+ job->msg_len_to_cipher_in_bytes =
+ vec->packet_len - vec->clear_len;
+
+ job->hash_alg = AES_CCM;
+ job->hash_start_src_offset_in_bytes = vec->clear_len;
+ job->msg_len_to_hash_in_bytes =
+ vec->packet_len - vec->clear_len;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->auth_len;
+
+ job->u.CCM.aad_len_in_bytes = vec->clear_len;
+ job->u.CCM.aad = job->src;
+
+ job->user_data = targets[i];
+ job->user_data2 = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ if (num_jobs < 4) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ if (!ccm_job_ok(vec, job, job->user_data, padding,
+ job->user_data2, sizeof(padding),
+ dir, in_place))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+
+ if (!ccm_job_ok(vec, job, job->user_data, padding,
+ job->user_data2, sizeof(padding), dir,
+ in_place))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (targets[i] != NULL)
+ free(targets[i]);
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (targets != NULL)
+ free(targets);
+
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_ccm_std_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt = sizeof(ccm_vectors) / sizeof(ccm_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("AES-CCM standard test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("Standard vector [%d/%d] NONCELen:%d PktLen:%d "
+ "AADLen:%d AUTHlen:%d\n",
+ vect, vectors_cnt,
+ (int) ccm_vectors[idx].nonce_len,
+ (int) ccm_vectors[idx].packet_len,
+ (int) ccm_vectors[idx].clear_len,
+ (int) ccm_vectors[idx].auth_len);
+#else
+ printf(".");
+#endif
+
+ if (test_ccm(mb_mgr, &ccm_vectors[idx], ENCRYPT, 1, num_jobs)) {
+ printf("error #%d encrypt in-place\n", vect);
+ errors++;
+ }
+
+ if (test_ccm(mb_mgr, &ccm_vectors[idx], DECRYPT, 1, num_jobs)) {
+ printf("error #%d decrypt in-place\n", vect);
+ errors++;
+ }
+
+ if (test_ccm(mb_mgr, &ccm_vectors[idx], ENCRYPT, 0, num_jobs)) {
+ printf("error #%d encrypt out-of-place\n", vect);
+ errors++;
+ }
+
+ if (test_ccm(mb_mgr, &ccm_vectors[idx], DECRYPT, 0, num_jobs)) {
+ printf("error #%d decrypt out-of-place\n", vect);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+ccm_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ errors += test_ccm_std_vectors(mb_mgr, 1);
+ errors += test_ccm_std_vectors(mb_mgr, 3);
+ errors += test_ccm_std_vectors(mb_mgr, 4);
+ errors += test_ccm_std_vectors(mb_mgr, 5);
+ errors += test_ccm_std_vectors(mb_mgr, 7);
+ errors += test_ccm_std_vectors(mb_mgr, 8);
+ errors += test_ccm_std_vectors(mb_mgr, 9);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/chained_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/chained_test.c
new file mode 100644
index 000000000..eddef42e3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/chained_test.c
@@ -0,0 +1,511 @@
+/*****************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+#define SHA1_BLOCK_SIZE 64
+#define SHA1_DIGEST_SIZE 20
+
+int chained_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+struct chained_vector {
+ const uint8_t *cipher_key; /* cipher key */
+ uint32_t cipher_key_len; /* cipher key length */
+ const uint8_t *IV; /* initialization vector */
+ const uint8_t *PT; /* plaintext */
+ uint64_t PTlen; /* plaintext length */
+ const uint8_t *CT; /* ciphertext - same length as PT */
+ const uint8_t *hash_key; /* hash key */
+ uint32_t hash_key_len; /* hash key length */
+ const uint8_t *Digest_PT; /* digest for plaintext */
+ const uint8_t *Digest_CT; /* digest for ciphertext */
+ uint32_t Digest_len; /* digest length */
+};
+
+const struct test_set {
+ JOB_CIPHER_DIRECTION dir;
+ JOB_CHAIN_ORDER order;
+ const char *set_name;
+} test_sets[] = {
+ {
+ .dir = ENCRYPT,
+ .order = CIPHER_HASH,
+ .set_name = "encrypt-hash"
+ },
+ {
+ .dir = DECRYPT,
+ .order = CIPHER_HASH,
+ .set_name = "decrypt-hash"
+ },
+ {
+ .dir = ENCRYPT,
+ .order = HASH_CIPHER,
+ .set_name = "hash-encrypt"
+ },
+ {
+ .dir = DECRYPT,
+ .order = HASH_CIPHER,
+ .set_name = "hash-decrypt"
+ },
+
+};
+
+const char *place_str[] = {"out-of-place", "in-place"};
+
+/* AES-CBC + SHA1-HMAC test vectors */
+
+/* 128-bit */
+static const uint8_t K1[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t IV1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const uint8_t P1[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C1[] = {
+ 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46,
+ 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d,
+ 0x50, 0x86, 0xcb, 0x9b, 0x50, 0x72, 0x19, 0xee,
+ 0x95, 0xdb, 0x11, 0x3a, 0x91, 0x76, 0x78, 0xb2,
+ 0x73, 0xbe, 0xd6, 0xb8, 0xe3, 0xc1, 0x74, 0x3b,
+ 0x71, 0x16, 0xe6, 0x9e, 0x22, 0x22, 0x95, 0x16,
+ 0x3f, 0xf1, 0xca, 0xa1, 0x68, 0x1f, 0xac, 0x09,
+ 0x12, 0x0e, 0xca, 0x30, 0x75, 0x86, 0xe1, 0xa7
+};
+static const uint8_t DP1[] = {
+ 0x6F, 0xA4, 0x7D, 0x1B, 0x8E, 0xAB, 0x1D, 0xB9,
+ 0x8B, 0x62, 0xC9, 0xF2, 0xDF, 0xA2, 0xCC, 0x46,
+ 0x37, 0xB8, 0xD7, 0xB1
+};
+static const uint8_t DC1[] = {
+ 0xDF, 0x1E, 0x5A, 0xDB, 0xE7, 0x5A, 0xAB, 0xAE,
+ 0x0B, 0x98, 0x34, 0x30, 0xE8, 0x40, 0x8B, 0xB4,
+ 0xDB, 0x22, 0x3A, 0x89
+};
+
+/* Same key for cipher and hash */
+static const struct chained_vector chained_vectors[] = {
+ {K1, sizeof(K1), IV1, P1, sizeof(P1), C1,
+ K1, sizeof(K1), DP1, DC1, sizeof(DP1)},
+};
+
+static int
+chained_job_ok(const JOB_AES_HMAC *job,
+ const unsigned num_vec,
+ const uint8_t *expected_text,
+ const unsigned text_len,
+ const uint8_t *received_text,
+ const uint8_t *expected_digest,
+ const unsigned digest_len,
+ const uint8_t *received_digest,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ if (job->status != STS_COMPLETED) {
+ printf("%d error status:%d, job %d",
+ __LINE__, job->status, num_vec);
+ return 0;
+ }
+
+ /* cipher checks */
+ if (memcmp(expected_text, received_text + sizeof_padding,
+ text_len)) {
+ printf("cipher %d mismatched\n", num_vec);
+ hexdump(stderr, "Received", received_text + sizeof_padding,
+ text_len);
+ hexdump(stderr, "Expected", expected_text,
+ text_len);
+ return 0;
+ }
+
+ if (memcmp(padding, received_text, sizeof_padding)) {
+ printf("cipher %d overwrite head\n", num_vec);
+ hexdump(stderr, "Target", received_text, sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding,
+ received_text + sizeof_padding + text_len,
+ sizeof_padding)) {
+ printf("cipher %d overwrite tail\n", num_vec);
+ hexdump(stderr, "Target",
+ received_text + sizeof_padding + text_len,
+ sizeof_padding);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(expected_digest, received_digest + sizeof_padding,
+ digest_len)) {
+ printf("hash %d mismatched\n", num_vec);
+ hexdump(stderr, "Received", received_digest + sizeof_padding,
+ digest_len);
+ hexdump(stderr, "Expected", expected_digest,
+ digest_len);
+ return 0;
+ }
+
+ if (memcmp(padding, received_digest, sizeof_padding)) {
+ printf("hash %d overwrite head\n", num_vec);
+ hexdump(stderr, "Target", received_digest, sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, received_digest + sizeof_padding + digest_len,
+ sizeof_padding)) {
+ printf("hash %d overwrite tail\n", num_vec);
+ hexdump(stderr, "Target",
+ received_digest + sizeof_padding + digest_len,
+ sizeof_padding);
+ return 0;
+ }
+
+
+ return 1;
+}
+
+static int
+test_chained_many(struct MB_MGR *mb_mgr,
+ const void *enc_keys,
+ const void *dec_keys,
+ const struct chained_vector *vec,
+ JOB_CIPHER_DIRECTION dir,
+ JOB_CHAIN_ORDER order,
+ JOB_CIPHER_MODE cipher,
+ JOB_HASH_ALG hash,
+ const void *ipad_hash,
+ const void *opad_hash,
+ const unsigned in_place,
+ const unsigned num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **targets = NULL;
+ uint8_t **auths = NULL;
+ unsigned i, jobs_rx = 0;
+ int ret = -1;
+ const unsigned cipher_key_size = vec->cipher_key_len;
+ const void *iv = vec->IV;
+ const unsigned text_len = (unsigned) vec->PTlen;
+ const unsigned digest_size = vec->Digest_len;
+ const uint8_t *in_text = (dir == ENCRYPT) ? vec->PT : vec->CT;
+ const uint8_t *out_text = (dir == ENCRYPT) ? vec->CT : vec->PT;
+ const uint8_t *digest;
+
+ if (num_jobs == 0)
+ return 0;
+
+ if ((dir == ENCRYPT && order == CIPHER_HASH) ||
+ (dir == DECRYPT && order == HASH_CIPHER))
+ digest = vec->Digest_CT;
+ else
+ digest = vec->Digest_PT;
+
+ targets = malloc(num_jobs * sizeof(void *));
+ if (targets == NULL) {
+ fprintf(stderr, "Can't allocate memory for targets array\n");
+ goto end;
+ }
+ memset(targets, 0, num_jobs * sizeof(void *));
+ auths = malloc(num_jobs * sizeof(void *));
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate memory for auths array\n");
+ goto end;
+ }
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ memset(padding, -1, sizeof(padding));
+
+ for (i = 0; i < num_jobs; i++) {
+ targets[i] = malloc(text_len + (sizeof(padding) * 2));
+ if (targets[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(targets[i], -1, text_len + (sizeof(padding) * 2));
+ if (in_place) {
+ /* copy input text to the allocated buffer */
+ memcpy(targets[i] + sizeof(padding), in_text, text_len);
+ }
+
+ auths[i] = malloc(digest_size + (sizeof(padding) * 2));
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(auths[i], -1, digest_size + (sizeof(padding) * 2));
+ }
+
+ /* flush the scheduler */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (in_place) {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = targets[i] + sizeof(padding);
+ } else {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = in_text;
+ }
+ job->cipher_mode = cipher;
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->aes_key_len_in_bytes = cipher_key_size;
+
+ job->iv = iv;
+ job->iv_len_in_bytes = 16;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = text_len;
+ job->user_data = (void *)((uint64_t)i);
+
+ job->hash_alg = hash;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = digest_size;
+ /*
+ * If operation is out of place and hash operation is done
+ * after encryption/decryption, hash operation needs to be
+ * done in the destination buffer.
+ * Since hash_start_src_offset_in_bytes refers to the offset
+ * in the source buffer, this offset is set to point at
+ * the destination buffer.
+ */
+ if (!in_place && (job->chain_order == CIPHER_HASH)) {
+ const uintptr_t u_src = (const uintptr_t) job->src;
+ const uintptr_t u_dst = (const uintptr_t) job->dst;
+ const uintptr_t offset = (u_dst > u_src) ?
+ (u_dst - u_src) :
+ (UINTPTR_MAX - u_src + u_dst + 1);
+
+ job->hash_start_src_offset_in_bytes = (uint64_t)offset;
+ } else {
+ job->hash_start_src_offset_in_bytes = 0;
+ }
+ job->msg_len_to_hash_in_bytes = text_len;
+ job->u.HMAC._hashed_auth_key_xor_ipad = ipad_hash;
+ job->u.HMAC._hashed_auth_key_xor_opad = opad_hash;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != NULL) {
+ jobs_rx++;
+ const unsigned num =
+ (const unsigned)((uint64_t)job->user_data);
+
+ if (!chained_job_ok(job, num, out_text, text_len,
+ targets[num],
+ digest, digest_size, auths[num],
+ padding, sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ const int num = (const unsigned)((uint64_t)job->user_data);
+
+ if (!chained_job_ok(job, num, out_text, text_len, targets[num],
+ digest, digest_size, auths[num],
+ padding, sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (targets != NULL)
+ free(targets[i]);
+ if (auths != NULL)
+ free(auths[i]);
+ }
+ free(targets);
+ free(auths);
+ return ret;
+}
+
+static int
+test_chained_vectors(struct MB_MGR *mb_mgr, const int vec_cnt,
+ const struct chained_vector *vec_tab, const char *banner,
+ const JOB_CIPHER_MODE cipher,
+ const JOB_HASH_ALG hash,
+ unsigned hash_block_size, int num_jobs)
+{
+ int vect, errors = 0;
+ DECLARE_ALIGNED(uint32_t enc_keys[15*4], 16);
+ DECLARE_ALIGNED(uint32_t dec_keys[15*4], 16);
+ uint8_t *buf = NULL;
+ uint8_t *hash_key = NULL;
+ DECLARE_ALIGNED(uint8_t ipad_hash[128], 16);
+ DECLARE_ALIGNED(uint8_t opad_hash[128], 16);
+ unsigned hash_key_len, i;
+
+ buf = malloc(hash_block_size);
+ if (buf == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto exit;
+ }
+
+ hash_key = malloc(hash_block_size);
+ if (hash_key == NULL) {
+ fprintf(stderr, "Can't allocate key memory\n");
+ goto exit;
+ }
+
+ printf("%s (N jobs = %d):\n", banner, num_jobs);
+ for (vect = 0; vect < vec_cnt; vect++) {
+#ifdef DEBUG
+ printf("[%d/%d] Standard vector key_len:%d\n",
+ vect + 1, vec_cnt,
+ (int) vec_tab[vect].cipher_key_len);
+#else
+ printf(".");
+#endif
+ /* prepare the cipher key */
+ switch (vec_tab[vect].cipher_key_len) {
+ case 16:
+ IMB_AES_KEYEXP_128(mb_mgr, vec_tab[vect].cipher_key,
+ enc_keys, dec_keys);
+ break;
+ case 24:
+ IMB_AES_KEYEXP_192(mb_mgr, vec_tab[vect].cipher_key,
+ enc_keys, dec_keys);
+ break;
+ case 32:
+ default:
+ IMB_AES_KEYEXP_256(mb_mgr, vec_tab[vect].cipher_key,
+ enc_keys, dec_keys);
+ break;
+ }
+
+ /* prepare the hash key */
+ memset(hash_key, 0, hash_block_size);
+ if (vec_tab[vect].hash_key_len <= hash_block_size) {
+ memcpy(hash_key, vec_tab[vect].hash_key,
+ vec_tab[vect].hash_key_len);
+ hash_key_len = (int) vec_tab[vect].hash_key_len;
+ } else {
+ IMB_SHA1(mb_mgr, vec_tab[vect].hash_key,
+ vec_tab[vect].hash_key_len, hash_key);
+ hash_key_len = hash_block_size;
+ }
+
+ /* compute ipad hash */
+ memset(buf, 0x36, hash_block_size);
+ for (i = 0; i < hash_key_len; i++)
+ buf[i] ^= hash_key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, hash_block_size);
+ for (i = 0; i < hash_key_len; i++)
+ buf[i] ^= hash_key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, opad_hash);
+
+ for (i = 0; i < DIM(test_sets); i++) {
+ unsigned in_place;
+
+ for (in_place = 0; in_place < DIM(place_str);
+ in_place++) {
+ if (test_chained_many(mb_mgr,
+ enc_keys, dec_keys,
+ &vec_tab[vect],
+ test_sets[i].dir,
+ test_sets[i].order,
+ cipher, hash,
+ ipad_hash, opad_hash,
+ in_place, num_jobs)) {
+ printf("error #%d %s %s\n", vect + 1,
+ test_sets[i].set_name,
+ place_str[in_place]);
+ errors++;
+ }
+ }
+ }
+ }
+ printf("\n");
+
+exit:
+ free(buf);
+ free(hash_key);
+ return errors;
+}
+
+int
+chained_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ const int num_jobs_tab[] = {
+ 1, 3, 4, 5, 7, 8, 9, 15, 16, 17
+ };
+ unsigned i;
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ for (i = 0; i < DIM(num_jobs_tab); i++)
+ errors += test_chained_vectors(mb_mgr, DIM(chained_vectors),
+ chained_vectors,
+ "AES-CBC + SHA1-HMAC standard test vectors",
+ CBC, SHA1, SHA1_BLOCK_SIZE,
+ num_jobs_tab[i]);
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/cmac_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/cmac_test.c
new file mode 100644
index 000000000..9365af761
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/cmac_test.c
@@ -0,0 +1,1354 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+enum cmac_type {
+ CMAC = 0,
+ CMAC_BITLEN,
+};
+
+int cmac_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/*
+ * Test vectors from https://tools.ietf.org/html/rfc4493
+ */
+
+/*
+ * Subkey Generation
+ * K 2b7e1516 28aed2a6 abf71588 09cf4f3c
+ * AES-128(key,0) 7df76b0c 1ab899b3 3e42f047 b91b546f
+ * K1 fbeed618 35713366 7c85e08f 7236a8de
+ * K2 f7ddac30 6ae266cc f90bc11e e46d513b
+ */
+static const uint8_t key[16] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t sub_key1[16] = {
+ 0xfb, 0xee, 0xd6, 0x18, 0x35, 0x71, 0x33, 0x66,
+ 0x7c, 0x85, 0xe0, 0x8f, 0x72, 0x36, 0xa8, 0xde
+};
+static const uint8_t sub_key2[16] = {
+ 0xf7, 0xdd, 0xac, 0x30, 0x6a, 0xe2, 0x66, 0xcc,
+ 0xf9, 0x0b, 0xc1, 0x1e, 0xe4, 0x6d, 0x51, 0x3b
+};
+
+/*
+ * Example 1: len = 0
+ * M <empty string>
+ * AES-CMAC bb1d6929 e9593728 7fa37d12 9b756746
+ */
+static const uint8_t T_1[16] = {
+ 0xbb, 0x1d, 0x69, 0x29, 0xe9, 0x59, 0x37, 0x28,
+ 0x7f, 0xa3, 0x7d, 0x12, 0x9b, 0x75, 0x67, 0x46
+};
+
+/*
+ * Example 2: len = 16
+ * M 6bc1bee2 2e409f96 e93d7e11 7393172a
+ * AES-CMAC 070a16b4 6b4d4144 f79bdd9d d04a287c
+ */
+static const uint8_t T_2[16] = {
+ 0x07, 0x0a, 0x16, 0xb4, 0x6b, 0x4d, 0x41, 0x44,
+ 0xf7, 0x9b, 0xdd, 0x9d, 0xd0, 0x4a, 0x28, 0x7c
+};
+
+/*
+ * Example 3: len = 40
+ * M 6bc1bee2 2e409f96 e93d7e11 7393172a
+ * ae2d8a57 1e03ac9c 9eb76fac 45af8e51
+ * 30c81c46 a35ce411
+ * AES-CMAC dfa66747 de9ae630 30ca3261 1497c827
+ */
+static const uint8_t T_3[16] = {
+ 0xdf, 0xa6, 0x67, 0x47, 0xde, 0x9a, 0xe6, 0x30,
+ 0x30, 0xca, 0x32, 0x61, 0x14, 0x97, 0xc8, 0x27
+};
+
+/*
+ * Example 4: len = 64
+ * M 6bc1bee2 2e409f96 e93d7e11 7393172a
+ * ae2d8a57 1e03ac9c 9eb76fac 45af8e51
+ * 30c81c46 a35ce411 e5fbc119 1a0a52ef
+ * f69f2445 df4f9b17 ad2b417b e66c3710
+ * AES-CMAC 51f0bebf 7e3b9d92 fc497417 79363cfe
+ */
+static const uint8_t T_4[16] = {
+ 0x51, 0xf0, 0xbe, 0xbf, 0x7e, 0x3b, 0x9d, 0x92,
+ 0xfc, 0x49, 0x74, 0x17, 0x79, 0x36, 0x3c, 0xfe
+};
+
+/*
+ * Custom Vector
+ *
+ * Example 5: len = 8
+ * M 6bc1bee2 2e409f96
+ * AES-CMAC dc87cdcf 77a2f182 9e012c4d 31af2f8b
+ */
+static const uint8_t T_5[16] = {
+ 0xdc, 0x87, 0xcd, 0xcf, 0x77, 0xa2, 0xf1, 0x82,
+ 0x9e, 0x01, 0x2c, 0x4d, 0x31, 0xaf, 0x2f, 0x8b
+};
+
+static const uint8_t M[64] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 1
+ *
+ * CMAC(K,M):
+ * K = (hex) 2bd6459f 82c5b300 952c4910 4881ff48
+ * Mlen = 122 (bits)
+ * M = (hex) 38a6f056 c0000000 33323462 63393840
+ *
+ * Subkey generation:
+ * K1 = (hex) dc84c270 b5bf83f9 6f90be18 8d3f6418
+ * K2 = (hex) b90984e1 6b7f07f2 df217c31 1a7ec8b7
+ *
+ * MAC generation:
+ * C1 = (hex) 118c6eb8 b775144b 0b831110 54c96eb6
+ * MACT = (hex) 118c6eb8
+ */
+static const uint8_t EIA2_128_K_1[16] = {
+ 0x2b, 0xd6, 0x45, 0x9f, 0x82, 0xc5, 0xb3, 0x00,
+ 0x95, 0x2c, 0x49, 0x10, 0x48, 0x81, 0xff, 0x48
+};
+
+static const uint8_t EIA2_128_SK1_1[16] = {
+ 0xdc, 0x84, 0xc2, 0x70, 0xb5, 0xbf, 0x83, 0xf9,
+ 0x6f, 0x90, 0xbe, 0x18, 0x8d, 0x3f, 0x64, 0x18
+};
+
+static const uint8_t EIA2_128_SK2_1[16] = {
+ 0xb9, 0x09, 0x84, 0xe1, 0x6b, 0x7f, 0x07, 0xf2,
+ 0xdf, 0x21, 0x7c, 0x31, 0x1a, 0x7e, 0xc8, 0xb7
+};
+
+static const uint8_t EIA2_128_T_1[4] = {
+ 0x11, 0x8c, 0x6e, 0xb8
+};
+
+static const uint8_t EIA2_128_M_1[16] = {
+ 0x38, 0xa6, 0xf0, 0x56, 0xc0, 0x00, 0x00, 0x00,
+ 0x33, 0x32, 0x34, 0x62, 0x63, 0x39, 0x38, 0x40 /* 0x40 = 0100 0000 */
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 2
+ *
+ * CMAC(K, M):
+ * K = d3c5d592 327fb11c 4035c668 0af8c6d1
+ * Mlen = 128
+ * M = 398a59b4 d4000000 484583d5 afe082ae
+ *
+ * Subkey Generation:
+ * L = 9b71f299 132915d3 605211b5 e5df8632
+ * K1 = 36e3e532 26522ba6 c0a4236b cbbf0ce3
+ * K2 = 6dc7ca64 4ca4574d 814846d7 977e19c6
+ *
+ * MAC generation:
+ * C1 = b93787e6 493ff113 ad73d3e0 1e826d73
+ * MACT = b93787e6
+ */
+static const uint8_t EIA2_128_K_2[16] = {
+ 0xd3, 0xc5, 0xd5, 0x92, 0x32, 0x7f, 0xb1, 0x1c,
+ 0x40, 0x35, 0xc6, 0x68, 0x0a, 0xf8, 0xc6, 0xd1
+};
+
+static const uint8_t EIA2_128_SK1_2[16] = {
+ 0x36, 0xe3, 0xe5, 0x32, 0x26, 0x52, 0x2b, 0xa6,
+ 0xc0, 0xa4, 0x23, 0x6b, 0xcb, 0xbf, 0x0c, 0xe3
+};
+
+static const uint8_t EIA2_128_SK2_2[16] = {
+ 0x6d, 0xc7, 0xca, 0x64, 0x4c, 0xa4, 0x57, 0x4d,
+ 0x81, 0x48, 0x46, 0xd7, 0x97, 0x7e, 0x19, 0xc6
+};
+
+static const uint8_t EIA2_128_T_2[4] = {
+ 0xb9, 0x37, 0x87, 0xe6
+};
+
+static const uint8_t EIA2_128_M_2[16] = {
+ 0x39, 0x8a, 0x59, 0xb4, 0xd4, 0x00, 0x00, 0x00,
+ 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 3
+ *
+ * CMAC(K, M):
+ * K = 7e5e9443 1e11d738 28d739cc 6ced4573
+ * Mlen = 318
+ * M = 36af6144 c4000000 b3d3c917 0a4e1632 f60f8610 13d22d84 b726b6a2
+ * 78d802d1 eeaf1321 ba5929dc
+ *
+ * Subkey Generation:
+ * L = d78b4628 35781e79 d2255f8d 309a60ef
+ * K1 = af168c50 6af03cf3 a44abf1a 6134c159
+ * K2 = 5e2d18a0 d5e079e7 48957e34 c2698235
+ *
+ * MAC generation:
+ * C3 = 1f60b01d e05aa666 3bda32c6 1771e70b
+ * MACT = 1f60b01d
+ */
+static const uint8_t EIA2_128_K_3[16] = {
+ 0x7e, 0x5e, 0x94, 0x43, 0x1e, 0x11, 0xd7, 0x38,
+ 0x28, 0xd7, 0x39, 0xcc, 0x6c, 0xed, 0x45, 0x73
+};
+
+static const uint8_t EIA2_128_SK1_3[16] = {
+ 0xaf, 0x16, 0x8c, 0x50, 0x6a, 0xf0, 0x3c, 0xf3,
+ 0xa4, 0x4a, 0xbf, 0x1a, 0x61, 0x34, 0xc1, 0x59
+};
+
+static const uint8_t EIA2_128_SK2_3[16] = {
+ 0x5e, 0x2d, 0x18, 0xa0, 0xd5, 0xe0, 0x79, 0xe7,
+ 0x48, 0x95, 0x7e, 0x34, 0xc2, 0x69, 0x82, 0x35
+};
+
+static const uint8_t EIA2_128_T_3[4] = {
+ 0x1f, 0x60, 0xb0, 0x1d
+};
+
+static const uint8_t EIA2_128_M_3[40] = {
+ 0x36, 0xaf, 0x61, 0x44, 0xc4, 0x00, 0x00, 0x00,
+ 0xb3, 0xd3, 0xc9, 0x17, 0x0a, 0x4e, 0x16, 0x32,
+ 0xf6, 0x0f, 0x86, 0x10, 0x13, 0xd2, 0x2d, 0x84,
+ 0xb7, 0x26, 0xb6, 0xa2, 0x78, 0xd8, 0x02, 0xd1,
+ 0xee, 0xaf, 0x13, 0x21, 0xba, 0x59, 0x29, 0xdc
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 4
+ *
+ * CMAC(K, M):
+ * K = d3419be8 21087acd 02123a92 48033359
+ * Mlen = 575
+ * M = c7590ea9 b8000000 bbb05703 8809496b
+ * cff86d6f bc8ce5b1 35a06b16 6054f2d5
+ * 65be8ace 75dc851e 0bcdd8f0 7141c495
+ * 872fb5d8 c0c66a8b 6da55666 3e4e4612
+ * 05d84580 bee5bc7e
+ *
+ * Subkey Generation:
+ * L = 054dd008 2d9ecd21 a3f32b0a a7369be4
+ * K1 = 0a9ba010 5b3d9a43 47e65615 4e6d37c8
+ * K2 = 15374020 b67b3486 8fccac2a 9cda6f90
+ *
+ * MAC generation:
+ * C5 = 6846a2f0 a0b6be7a 4fb26a15 7e914c53
+ * MACT = 6846a2f0
+ */
+static const uint8_t EIA2_128_K_4[16] = {
+ 0xd3, 0x41, 0x9b, 0xe8, 0x21, 0x08, 0x7a, 0xcd,
+ 0x02, 0x12, 0x3a, 0x92, 0x48, 0x03, 0x33, 0x59
+};
+
+static const uint8_t EIA2_128_SK1_4[16] = {
+ 0x0a, 0x9b, 0xa0, 0x10, 0x5b, 0x3d, 0x9a, 0x43,
+ 0x47, 0xe6, 0x56, 0x15, 0x4e, 0x6d, 0x37, 0xc8
+};
+
+static const uint8_t EIA2_128_SK2_4[16] = {
+ 0x15, 0x37, 0x40, 0x20, 0xb6, 0x7b, 0x34, 0x86,
+ 0x8f, 0xcc, 0xac, 0x2a, 0x9c, 0xda, 0x6f, 0x90
+};
+
+static const uint8_t EIA2_128_T_4[4] = {
+ 0x68, 0x46, 0xa2, 0xf0
+};
+
+static const uint8_t EIA2_128_M_4[72] = {
+ 0xc7, 0x59, 0x0e, 0xa9, 0xb8, 0x00, 0x00, 0x00,
+ 0xbb, 0xb0, 0x57, 0x03, 0x88, 0x09, 0x49, 0x6b,
+ 0xcf, 0xf8, 0x6d, 0x6f, 0xbc, 0x8c, 0xe5, 0xb1,
+ 0x35, 0xa0, 0x6b, 0x16, 0x60, 0x54, 0xf2, 0xd5,
+ 0x65, 0xbe, 0x8a, 0xce, 0x75, 0xdc, 0x85, 0x1e,
+ 0x0b, 0xcd, 0xd8, 0xf0, 0x71, 0x41, 0xc4, 0x95,
+ 0x87, 0x2f, 0xb5, 0xd8, 0xc0, 0xc6, 0x6a, 0x8b,
+ 0x6d, 0xa5, 0x56, 0x66, 0x3e, 0x4e, 0x46, 0x12,
+ 0x05, 0xd8, 0x45, 0x80, 0xbe, 0xe5, 0xbc, 0x7e
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 5
+ *
+ * CMAC(K, M):
+ * K = 83fd23a2 44a74cf3 58da3019 f1722635
+ * Mlen = 832
+ * M = 36af6144 7c000000 35c68716 633c66fb
+ * 750c2668 65d53c11 ea05b1e9 fa49c839
+ * 8d48e1ef a5909d39 47902837 f5ae96d5
+ * a05bc8d6 1ca8dbef 1b13a4b4 abfe4fb1
+ * 006045b6 74bb5472 9304c382 be53a5af
+ * 05556176 f6eaa2ef 1d05e4b0 83181ee6
+ * 74cda5a4 85f74d7a
+ *
+ * Subkey Generation:
+ * L = 9df61c57 3c86acac 704db9d5 b0dea444
+ * K1 = 3bec38ae 790d5958 e09b73ab 61bd480f
+ * K2 = 77d8715c f21ab2b1 c136e756 c37a901e
+ *
+ * MAC generation:
+ * C7 = e657e182 5298f2fa ee2ca1e0 7373bc7e
+ * MACT = e657e182
+ */
+static const uint8_t EIA2_128_K_5[16] = {
+ 0x83, 0xfd, 0x23, 0xa2, 0x44, 0xa7, 0x4c, 0xf3,
+ 0x58, 0xda, 0x30, 0x19, 0xf1, 0x72, 0x26, 0x35
+};
+
+static const uint8_t EIA2_128_SK1_5[16] = {
+ 0x3b, 0xec, 0x38, 0xae, 0x79, 0x0d, 0x59, 0x58,
+ 0xe0, 0x9b, 0x73, 0xab, 0x61, 0xbd, 0x48, 0x0f
+};
+
+static const uint8_t EIA2_128_SK2_5[16] = {
+ 0x77, 0xd8, 0x71, 0x5c, 0xf2, 0x1a, 0xb2, 0xb1,
+ 0xc1, 0x36, 0xe7, 0x56, 0xc3, 0x7a, 0x90, 0x1e
+};
+
+static const uint8_t EIA2_128_T_5[4] = {
+ 0xe6, 0x57, 0xe1, 0x82
+};
+
+static const uint8_t EIA2_128_M_5[104] = {
+ 0x36, 0xaf, 0x61, 0x44, 0x7c, 0x00, 0x00, 0x00,
+ 0x35, 0xc6, 0x87, 0x16, 0x63, 0x3c, 0x66, 0xfb,
+ 0x75, 0x0c, 0x26, 0x68, 0x65, 0xd5, 0x3c, 0x11,
+ 0xea, 0x05, 0xb1, 0xe9, 0xfa, 0x49, 0xc8, 0x39,
+ 0x8d, 0x48, 0xe1, 0xef, 0xa5, 0x90, 0x9d, 0x39,
+ 0x47, 0x90, 0x28, 0x37, 0xf5, 0xae, 0x96, 0xd5,
+ 0xa0, 0x5b, 0xc8, 0xd6, 0x1c, 0xa8, 0xdb, 0xef,
+ 0x1b, 0x13, 0xa4, 0xb4, 0xab, 0xfe, 0x4f, 0xb1,
+ 0x00, 0x60, 0x45, 0xb6, 0x74, 0xbb, 0x54, 0x72,
+ 0x93, 0x04, 0xc3, 0x82, 0xbe, 0x53, 0xa5, 0xaf,
+ 0x05, 0x55, 0x61, 0x76, 0xf6, 0xea, 0xa2, 0xef,
+ 0x1d, 0x05, 0xe4, 0xb0, 0x83, 0x18, 0x1e, 0xe6,
+ 0x74, 0xcd, 0xa5, 0xa4, 0x85, 0xf7, 0x4d, 0x7a
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 6
+ *
+ * CMAC(K, M):
+ * K = 6832a65c ff447362 1ebdd4ba 26a921fe
+ * Mlen = 447
+ * M = 36af6144 c0000000 d3c53839 62682071
+ * 77656676 20323837 63624098 1ba6824c
+ * 1bfb1ab4 85472029 b71d808c e33e2cc3
+ * c0b5fc1f 3de8a6dc
+ *
+ * Subkey Generation:
+ * L = e50123c3 87e13fd6 8d8bf0d0 a4581685
+ * K1 = ca024787 0fc27fad 1b17e1a1 48b02d8d
+ * K2 = 94048f0e 1f84ff5a 362fc342 91605b9d
+ *
+ * MAC generation:
+ * C4 = f0668c1e 4197300b 1243f834 25d06c25
+ * MACT = f0668c1e
+ */
+static const uint8_t EIA2_128_K_6[16] = {
+ 0x68, 0x32, 0xa6, 0x5c, 0xff, 0x44, 0x73, 0x62,
+ 0x1e, 0xbd, 0xd4, 0xba, 0x26, 0xa9, 0x21, 0xfe
+};
+
+static const uint8_t EIA2_128_SK1_6[16] = {
+ 0xca, 0x02, 0x47, 0x87, 0x0f, 0xc2, 0x7f, 0xad,
+ 0x1b, 0x17, 0xe1, 0xa1, 0x48, 0xb0, 0x2d, 0x8d
+};
+
+static const uint8_t EIA2_128_SK2_6[16] = {
+ 0x94, 0x04, 0x8f, 0x0e, 0x1f, 0x84, 0xff, 0x5a,
+ 0x36, 0x2f, 0xc3, 0x42, 0x91, 0x60, 0x5b, 0x9d
+};
+
+static const uint8_t EIA2_128_T_6[4] = {
+ 0xf0, 0x66, 0x8c, 0x1e
+};
+
+static const uint8_t EIA2_128_M_6[56] = {
+ 0x36, 0xaf, 0x61, 0x44, 0xc0, 0x00, 0x00, 0x00,
+ 0xd3, 0xc5, 0x38, 0x39, 0x62, 0x68, 0x20, 0x71,
+ 0x77, 0x65, 0x66, 0x76, 0x20, 0x32, 0x38, 0x37,
+ 0x63, 0x62, 0x40, 0x98, 0x1b, 0xa6, 0x82, 0x4c,
+ 0x1b, 0xfb, 0x1a, 0xb4, 0x85, 0x47, 0x20, 0x29,
+ 0xb7, 0x1d, 0x80, 0x8c, 0xe3, 0x3e, 0x2c, 0xc3,
+ 0xc0, 0xb5, 0xfc, 0x1f, 0x3d, 0xe8, 0xa6, 0xdc
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 7
+ *
+ * CMAC(K, M):
+ * K = 5d0a80d8 134ae196 77824b67 1e838af4
+ * Mlen = 2622
+ * M = 7827fab2 2c000000 70dedf2d c42c5cbd
+ * 3a96f8a0 b11418b3 608d5733 604a2cd3
+ * 6aabc70c e3193bb5 153be2d3 c06dfdb2
+ * d16e9c35 7158be6a 41d6b861 e491db3f
+ * bfeb518e fcf048d7 d5895373 0ff30c9e
+ * c470ffcd 663dc342 01c36add c0111c35
+ * b38afee7 cfdb582e 3731f8b4 baa8d1a8
+ * 9c06e811 99a97162 27be344e fcb436dd
+ * d0f096c0 64c3b5e2 c399993f c77394f9
+ * e09720a8 11850ef2 3b2ee05d 9e617360
+ * 9d86e1c0 c18ea51a 012a00bb 413b9cb8
+ * 188a703c d6bae31c c67b34b1 b00019e6
+ * a2b2a690 f02671fe 7c9ef8de c0094e53
+ * 3763478d 58d2c5f5 b827a014 8c5948a9
+ * 6931acf8 4f465a64 e62ce740 07e991e3
+ * 7ea823fa 0fb21923 b79905b7 33b631e6
+ * c7d6860a 3831ac35 1a9c730c 52ff72d9
+ * d308eedb ab21fde1 43a0ea17 e23edc1f
+ * 74cbb363 8a2033aa a15464ea a733385d
+ * bbeb6fd7 3509b857 e6a419dc a1d8907a
+ * f977fbac 4dfa35ec
+ *
+ * Subkey Generation:
+ * L = 9832e229 fbb93970 bcf7b282 3ee4fe5d
+ * K1 = 3065c453 f77272e1 79ef6504 7dc9fc3d
+ * K2 = 60cb88a7 eee4e5c2 f3deca08 fb93f87a
+ *
+ * MAC generation:
+ * C21 = f4cc8fa3 59e6e2e7 6e09c45d 6ea5e0de
+ * MACT = f4cc8fa3
+ */
+static const uint8_t EIA2_128_K_7[16] = {
+ 0x5d, 0x0a, 0x80, 0xd8, 0x13, 0x4a, 0xe1, 0x96,
+ 0x77, 0x82, 0x4b, 0x67, 0x1e, 0x83, 0x8a, 0xf4
+};
+
+static const uint8_t EIA2_128_SK1_7[16] = {
+ 0x30, 0x65, 0xc4, 0x53, 0xf7, 0x72, 0x72, 0xe1,
+ 0x79, 0xef, 0x65, 0x04, 0x7d, 0xc9, 0xfc, 0x3d
+};
+
+static const uint8_t EIA2_128_SK2_7[16] = {
+ 0x60, 0xcb, 0x88, 0xa7, 0xee, 0xe4, 0xe5, 0xc2,
+ 0xf3, 0xde, 0xca, 0x08, 0xfb, 0x93, 0xf8, 0x7a
+};
+
+static const uint8_t EIA2_128_T_7[4] = {
+ 0xf4, 0xcc, 0x8f, 0xa3
+};
+
+static const uint8_t EIA2_128_M_7[328] = {
+ 0x78, 0x27, 0xfa, 0xb2, 0x2c, 0x00, 0x00, 0x00,
+ 0x70, 0xde, 0xdf, 0x2d, 0xc4, 0x2c, 0x5c, 0xbd,
+ 0x3a, 0x96, 0xf8, 0xa0, 0xb1, 0x14, 0x18, 0xb3,
+ 0x60, 0x8d, 0x57, 0x33, 0x60, 0x4a, 0x2c, 0xd3,
+ 0x6a, 0xab, 0xc7, 0x0c, 0xe3, 0x19, 0x3b, 0xb5,
+ 0x15, 0x3b, 0xe2, 0xd3, 0xc0, 0x6d, 0xfd, 0xb2,
+ 0xd1, 0x6e, 0x9c, 0x35, 0x71, 0x58, 0xbe, 0x6a,
+ 0x41, 0xd6, 0xb8, 0x61, 0xe4, 0x91, 0xdb, 0x3f,
+ 0xbf, 0xeb, 0x51, 0x8e, 0xfc, 0xf0, 0x48, 0xd7,
+ 0xd5, 0x89, 0x53, 0x73, 0x0f, 0xf3, 0x0c, 0x9e,
+ 0xc4, 0x70, 0xff, 0xcd, 0x66, 0x3d, 0xc3, 0x42,
+ 0x01, 0xc3, 0x6a, 0xdd, 0xc0, 0x11, 0x1c, 0x35,
+ 0xb3, 0x8a, 0xfe, 0xe7, 0xcf, 0xdb, 0x58, 0x2e,
+ 0x37, 0x31, 0xf8, 0xb4, 0xba, 0xa8, 0xd1, 0xa8,
+ 0x9c, 0x06, 0xe8, 0x11, 0x99, 0xa9, 0x71, 0x62,
+ 0x27, 0xbe, 0x34, 0x4e, 0xfc, 0xb4, 0x36, 0xdd,
+ 0xd0, 0xf0, 0x96, 0xc0, 0x64, 0xc3, 0xb5, 0xe2,
+ 0xc3, 0x99, 0x99, 0x3f, 0xc7, 0x73, 0x94, 0xf9,
+ 0xe0, 0x97, 0x20, 0xa8, 0x11, 0x85, 0x0e, 0xf2,
+ 0x3b, 0x2e, 0xe0, 0x5d, 0x9e, 0x61, 0x73, 0x60,
+ 0x9d, 0x86, 0xe1, 0xc0, 0xc1, 0x8e, 0xa5, 0x1a,
+ 0x01, 0x2a, 0x00, 0xbb, 0x41, 0x3b, 0x9c, 0xb8,
+ 0x18, 0x8a, 0x70, 0x3c, 0xd6, 0xba, 0xe3, 0x1c,
+ 0xc6, 0x7b, 0x34, 0xb1, 0xb0, 0x00, 0x19, 0xe6,
+ 0xa2, 0xb2, 0xa6, 0x90, 0xf0, 0x26, 0x71, 0xfe,
+ 0x7c, 0x9e, 0xf8, 0xde, 0xc0, 0x09, 0x4e, 0x53,
+ 0x37, 0x63, 0x47, 0x8d, 0x58, 0xd2, 0xc5, 0xf5,
+ 0xb8, 0x27, 0xa0, 0x14, 0x8c, 0x59, 0x48, 0xa9,
+ 0x69, 0x31, 0xac, 0xf8, 0x4f, 0x46, 0x5a, 0x64,
+ 0xe6, 0x2c, 0xe7, 0x40, 0x07, 0xe9, 0x91, 0xe3,
+ 0x7e, 0xa8, 0x23, 0xfa, 0x0f, 0xb2, 0x19, 0x23,
+ 0xb7, 0x99, 0x05, 0xb7, 0x33, 0xb6, 0x31, 0xe6,
+ 0xc7, 0xd6, 0x86, 0x0a, 0x38, 0x31, 0xac, 0x35,
+ 0x1a, 0x9c, 0x73, 0x0c, 0x52, 0xff, 0x72, 0xd9,
+ 0xd3, 0x08, 0xee, 0xdb, 0xab, 0x21, 0xfd, 0xe1,
+ 0x43, 0xa0, 0xea, 0x17, 0xe2, 0x3e, 0xdc, 0x1f,
+ 0x74, 0xcb, 0xb3, 0x63, 0x8a, 0x20, 0x33, 0xaa,
+ 0xa1, 0x54, 0x64, 0xea, 0xa7, 0x33, 0x38, 0x5d,
+ 0xbb, 0xeb, 0x6f, 0xd7, 0x35, 0x09, 0xb8, 0x57,
+ 0xe6, 0xa4, 0x19, 0xdc, 0xa1, 0xd8, 0x90, 0x7a,
+ 0xf9, 0x77, 0xfb, 0xac, 0x4d, 0xfa, 0x35, 0xec
+};
+
+/*
+ * 3GPP 33.401 C.2.1 Test Case 8
+ *
+ * CMAC(K, M):
+ * K = b3120ffd b2cf6af4 e73eaf2e f4ebec69
+ * Mlen = 16512
+ * M = 296f393c 5c000000 00000000 00000000
+ * 01010101 01010101 e0958045 f3a0bba4
+ * e3968346 f0a3b8a7 c02a018a e6407652
+ * 26b987c9 13e6cbf0 83570016 cf83efbc
+ * 61c08251 3e21561a 427c009d 28c298ef
+ * ace78ed6 d56c2d45 05ad032e 9c04dc60
+ * e73a8169 6da665c6 c48603a5 7b45ab33
+ * 221585e6 8ee31691 87fb0239 528632dd
+ * 656c807e a3248b7b 46d002b2 b5c7458e
+ * b85b9ce9 5879e034 0859055e 3b0abbc3
+ * eace8719 caa80265 c97205d5 dc4bcc90
+ * 2fe18396 29ed7132 8a0f0449 f588557e
+ * 6898860e 042aecd8 4b2404c2 12c9222d
+ * a5bf8a89 ef679787 0cf50771 a60f66a2
+ * ee628536 57addf04 cdde07fa 414e11f1
+ * 2b4d81b9 b4e8ac53 8ea30666 688d881f
+ * 6c348421 992f31b9 4f8806ed 8fccff4c
+ * 9123b896 42527ad6 13b109bf 75167485
+ * f1268bf8 84b4cd23 d29a0934 925703d6
+ * 34098f77 67f1be74 91e708a8 bb949a38
+ * 73708aef 4a36239e 50cc0823 5cd5ed6b
+ * be578668 a17b58c1 171d0b90 e813a9e4
+ * f58a89d7 19b11042 d6360b1b 0f52deb7
+ * 30a58d58 faf46315 954b0a87 26914759
+ * 77dc88c0 d733feff 54600a0c c1d0300a
+ * aaeb9457 2c6e95b0 1ae90de0 4f1dce47
+ * f87e8fa7 bebf77e1 dbc20d6b a85cb914
+ * 3d518b28 5dfa04b6 98bf0cf7 819f20fa
+ * 7a288eb0 703d995c 59940c7c 66de57a9
+ * b70f8237 9b70e203 1e450fcf d2181326
+ * fcd28d88 23baaa80 df6e0f44 35596475
+ * 39fd8907 c0ffd9d7 9c130ed8 1c9afd9b
+ * 7e848c9f ed38443d 5d380e53 fbdb8ac8
+ * c3d3f068 76054f12 2461107d e92fea09
+ * c6f6923a 188d53af e54a10f6 0e6e9d5a
+ * 03d996b5 fbc820f8 a637116a 27ad04b4
+ * 44a0932d d60fbd12 671c11e1 c0ec73e7
+ * 89879faa 3d42c64d 20cd1252 742a3768
+ * c25a9015 85888ece e1e612d9 936b403b
+ * 0775949a 66cdfd99 a29b1345 baa8d9d5
+ * 400c9102 4b0a6073 63b013ce 5de9ae86
+ * 9d3b8d95 b0570b3c 2d391422 d32450cb
+ * cfae9665 2286e96d ec1214a9 34652798
+ * 0a8192ea c1c39a3a af6f1535 1da6be76
+ * 4df89772 ec0407d0 6e4415be fae7c925
+ * 80df9bf5 07497c8f 2995160d 4e218daa
+ * cb02944a bf83340c e8be1686 a960faf9
+ * 0e2d90c5 5cc6475b abc3171a 80a36317
+ * 4954955d 7101dab1 6ae81791 67e21444
+ * b443a9ea aa7c91de 36d118c3 9d389f8d
+ * d4469a84 6c9a262b f7fa1848 7a79e8de
+ * 11699e0b 8fdf557c b48719d4 53ba7130
+ * 56109b93 a218c896 75ac195f b4fb0663
+ * 9b379714 4955b3c9 327d1aec 003d42ec
+ * d0ea98ab f19ffb4a f3561a67 e77c35bf
+ * 15c59c24 12da881d b02b1bfb cebfac51
+ * 52bc99bc 3f1d15f7 71001b70 29fedb02
+ * 8f8b852b c4407eb8 3f891c9c a733254f
+ * dd1e9edb 56919ce9 fea21c17 4072521c
+ * 18319a54 b5d4efbe bddf1d8b 69b1cbf2
+ * 5f489fcc 98137254 7cf41d00 8ef0bca1
+ * 926f934b 735e090b 3b251eb3 3a36f82e
+ * d9b29cf4 cb944188 fa0e1e38 dd778f7d
+ * 1c9d987b 28d132df b9731fa4 f4b41693
+ * 5be49de3 0516af35 78581f2f 13f561c0
+ * 66336194 1eab249a 4bc123f8 d15cd711
+ * a956a1bf 20fe6eb7 8aea2373 361da042
+ * 6c79a530 c3bb1de0 c99722ef 1fde39ac
+ * 2b00a0a8 ee7c800a 08bc2264 f89f4eff
+ * e627ac2f 0531fb55 4f6d21d7 4c590a70
+ * adfaa390 bdfbb3d6 8e46215c ab187d23
+ * 68d5a71f 5ebec081 cd3b20c0 82dbe4cd
+ * 2faca287 73795d6b 0c10204b 659a939e
+ * f29bbe10 88243624 429927a7 eb576dd3
+ * a00ea5e0 1af5d475 83b2272c 0c161a80
+ * 6521a16f f9b0a722 c0cf26b0 25d5836e
+ * 2258a4f7 d4773ac8 01e4263b c294f43d
+ * ef7fa870 3f3a4197 46352588 7652b0b2
+ * a4a2a7cf 87f00914 871e2503 9113c7e1
+ * 618da340 64b57a43 c463249f b8d05e0f
+ * 26f4a6d8 4972e7a9 05482414 5f91295c
+ * dbe39a6f 920facc6 59712b46 a54ba295
+ * bbe6a901 54e91b33 985a2bcd 420ad5c6
+ * 7ec9ad8e b7ac6864 db272a51 6bc94c28
+ * 39b0a816 9a6bf58e 1a0c2ada 8c883b7b
+ * f497a491 71268ed1 5ddd2969 384e7ff4
+ * bf4aab2e c9ecc652 9cf629e2 df0f08a7
+ * 7a65afa1 2aa9b505 df8b287e f6cc9149
+ * 3d1caa39 076e28ef 1ea028f5 118de61a
+ * e02bb6ae fc3343a0 50292f19 9f401857
+ * b2bead5e 6ee2a1f1 91022f92 78016f04
+ * 7791a9d1 8da7d2a6 d27f2e0e 51c2f6ea
+ * 30e8ac49 a0604f4c 13542e85 b68381b9
+ * fdcfa0ce 4b2d3413 54852d36 0245c536
+ * b612af71 f3e77c90 95ae2dbd e504b265
+ * 733dabfe 10a20fc7 d6d32c21 ccc72b8b
+ * 3444ae66 3d65922d 17f82caa 2b865cd8
+ * 8913d291 a6589902 6ea13284 39723c19
+ * 8c36b0c3 c8d085bf af8a320f de334b4a
+ * 4919b44c 2b95f6e8 ecf73393 f7f0d2a4
+ * 0e60b1d4 06526b02 2ddc3318 10b1a5f7
+ * c347bd53 ed1f105d 6a0d30ab a477e178
+ * 889ab2ec 55d558de ab263020 4336962b
+ * 4db5b663 b6902b89 e85b31bc 6af50fc5
+ * 0accb3fb 9b57b663 29703137 8db47896
+ * d7fbaf6c 600add2c 67f936db 037986db
+ * 856eb49c f2db3f7d a6d23650 e438f188
+ * 4041b013 119e4c2a e5af37cc cdfb6866
+ * 0738b58b 3c59d1c0 24843747 2aba1f35
+ * ca1fb90c d714aa9f 635534f4 9e7c5bba
+ * 81c2b6b3 6fdee21c a27e347f 793d2ce9
+ * 44edb23c 8c9b914b e10335e3 50feb507
+ * 0394b7a4 a15c0ca1 20283568 b7bfc254
+ * fe838b13 7a2147ce 7c113a3a 4d65499d
+ * 9e86b87d bcc7f03b bd3a3ab1 aa243ece
+ * 5ba9bcf2 5f82836c fe473b2d 83e7a720
+ * 1cd0b96a 72451e86 3f6c3ba6 64a6d073
+ * d1f7b5ed 990865d9 78bd3815 d06094fc
+ * 9a2aba52 21c22d5a b996389e 3721e3af
+ * 5f05bedd c2875e0d faeb3902 1ee27a41
+ * 187cbb45 ef40c3e7 3bc03989 f9a30d12
+ * c54ba7d2 141da8a8 75493e65 776ef35f
+ * 97debc22 86cc4af9 b4623eee 902f840c
+ * 52f1b8ad 658939ae f71f3f72 b9ec1de2
+ * 1588bd35 484ea444 36343ff9 5ead6ab1
+ * d8afb1b2 a303df1b 71e53c4a ea6b2e3e
+ * 9372be0d 1bc99798 b0ce3cc1 0d2a596d
+ * 565dba82 f88ce4cf f3b33d5d 24e9c083
+ * 1124bf1a d54b7925 32983dd6 c3a8b7d0
+ *
+ * Subkey Generation:
+ * L = 2c645dcd 72114961 d8b9c864 7aac2c5b
+ * K1 = 58c8bb9a e42292c3 b17390c8 f55858b6
+ * K2 = b1917735 c8452587 62e72191 eab0b16c
+ *
+ * MAC generation:
+ * C129 = ebd5ccb0 b61ca905 29138303 f3377d22
+ * MACT = ebd5ccb0
+ */
+static const uint8_t EIA2_128_K_8[16] = {
+ 0xb3, 0x12, 0x0f, 0xfd, 0xb2, 0xcf, 0x6a, 0xf4,
+ 0xe7, 0x3e, 0xaf, 0x2e, 0xf4, 0xeb, 0xec, 0x69
+};
+
+static const uint8_t EIA2_128_SK1_8[16] = {
+ 0x58, 0xc8, 0xbb, 0x9a, 0xe4, 0x22, 0x92, 0xc3,
+ 0xb1, 0x73, 0x90, 0xc8, 0xf5, 0x58, 0x58, 0xb6
+};
+
+static const uint8_t EIA2_128_SK2_8[16] = {
+ 0xb1, 0x91, 0x77, 0x35, 0xc8, 0x45, 0x25, 0x87,
+ 0x62, 0xe7, 0x21, 0x91, 0xea, 0xb0, 0xb1, 0x6c
+};
+
+static const uint8_t EIA2_128_T_8[4] = {
+ 0xeb, 0xd5, 0xcc, 0xb0
+};
+
+static const uint8_t EIA2_128_M_8[2064] = {
+ 0x29, 0x6f, 0x39, 0x3c, 0x5c, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0xe0, 0x95, 0x80, 0x45, 0xf3, 0xa0, 0xbb, 0xa4,
+ 0xe3, 0x96, 0x83, 0x46, 0xf0, 0xa3, 0xb8, 0xa7,
+ 0xc0, 0x2a, 0x01, 0x8a, 0xe6, 0x40, 0x76, 0x52,
+ 0x26, 0xb9, 0x87, 0xc9, 0x13, 0xe6, 0xcb, 0xf0,
+ 0x83, 0x57, 0x00, 0x16, 0xcf, 0x83, 0xef, 0xbc,
+ 0x61, 0xc0, 0x82, 0x51, 0x3e, 0x21, 0x56, 0x1a,
+ 0x42, 0x7c, 0x00, 0x9d, 0x28, 0xc2, 0x98, 0xef,
+ 0xac, 0xe7, 0x8e, 0xd6, 0xd5, 0x6c, 0x2d, 0x45,
+ 0x05, 0xad, 0x03, 0x2e, 0x9c, 0x04, 0xdc, 0x60,
+ 0xe7, 0x3a, 0x81, 0x69, 0x6d, 0xa6, 0x65, 0xc6,
+ 0xc4, 0x86, 0x03, 0xa5, 0x7b, 0x45, 0xab, 0x33,
+ 0x22, 0x15, 0x85, 0xe6, 0x8e, 0xe3, 0x16, 0x91,
+ 0x87, 0xfb, 0x02, 0x39, 0x52, 0x86, 0x32, 0xdd,
+ 0x65, 0x6c, 0x80, 0x7e, 0xa3, 0x24, 0x8b, 0x7b,
+ 0x46, 0xd0, 0x02, 0xb2, 0xb5, 0xc7, 0x45, 0x8e,
+ 0xb8, 0x5b, 0x9c, 0xe9, 0x58, 0x79, 0xe0, 0x34,
+ 0x08, 0x59, 0x05, 0x5e, 0x3b, 0x0a, 0xbb, 0xc3,
+ 0xea, 0xce, 0x87, 0x19, 0xca, 0xa8, 0x02, 0x65,
+ 0xc9, 0x72, 0x05, 0xd5, 0xdc, 0x4b, 0xcc, 0x90,
+ 0x2f, 0xe1, 0x83, 0x96, 0x29, 0xed, 0x71, 0x32,
+ 0x8a, 0x0f, 0x04, 0x49, 0xf5, 0x88, 0x55, 0x7e,
+ 0x68, 0x98, 0x86, 0x0e, 0x04, 0x2a, 0xec, 0xd8,
+ 0x4b, 0x24, 0x04, 0xc2, 0x12, 0xc9, 0x22, 0x2d,
+ 0xa5, 0xbf, 0x8a, 0x89, 0xef, 0x67, 0x97, 0x87,
+ 0x0c, 0xf5, 0x07, 0x71, 0xa6, 0x0f, 0x66, 0xa2,
+ 0xee, 0x62, 0x85, 0x36, 0x57, 0xad, 0xdf, 0x04,
+ 0xcd, 0xde, 0x07, 0xfa, 0x41, 0x4e, 0x11, 0xf1,
+ 0x2b, 0x4d, 0x81, 0xb9, 0xb4, 0xe8, 0xac, 0x53,
+ 0x8e, 0xa3, 0x06, 0x66, 0x68, 0x8d, 0x88, 0x1f,
+ 0x6c, 0x34, 0x84, 0x21, 0x99, 0x2f, 0x31, 0xb9,
+ 0x4f, 0x88, 0x06, 0xed, 0x8f, 0xcc, 0xff, 0x4c,
+ 0x91, 0x23, 0xb8, 0x96, 0x42, 0x52, 0x7a, 0xd6,
+ 0x13, 0xb1, 0x09, 0xbf, 0x75, 0x16, 0x74, 0x85,
+ 0xf1, 0x26, 0x8b, 0xf8, 0x84, 0xb4, 0xcd, 0x23,
+ 0xd2, 0x9a, 0x09, 0x34, 0x92, 0x57, 0x03, 0xd6,
+ 0x34, 0x09, 0x8f, 0x77, 0x67, 0xf1, 0xbe, 0x74,
+ 0x91, 0xe7, 0x08, 0xa8, 0xbb, 0x94, 0x9a, 0x38,
+ 0x73, 0x70, 0x8a, 0xef, 0x4a, 0x36, 0x23, 0x9e,
+ 0x50, 0xcc, 0x08, 0x23, 0x5c, 0xd5, 0xed, 0x6b,
+ 0xbe, 0x57, 0x86, 0x68, 0xa1, 0x7b, 0x58, 0xc1,
+ 0x17, 0x1d, 0x0b, 0x90, 0xe8, 0x13, 0xa9, 0xe4,
+ 0xf5, 0x8a, 0x89, 0xd7, 0x19, 0xb1, 0x10, 0x42,
+ 0xd6, 0x36, 0x0b, 0x1b, 0x0f, 0x52, 0xde, 0xb7,
+ 0x30, 0xa5, 0x8d, 0x58, 0xfa, 0xf4, 0x63, 0x15,
+ 0x95, 0x4b, 0x0a, 0x87, 0x26, 0x91, 0x47, 0x59,
+ 0x77, 0xdc, 0x88, 0xc0, 0xd7, 0x33, 0xfe, 0xff,
+ 0x54, 0x60, 0x0a, 0x0c, 0xc1, 0xd0, 0x30, 0x0a,
+ 0xaa, 0xeb, 0x94, 0x57, 0x2c, 0x6e, 0x95, 0xb0,
+ 0x1a, 0xe9, 0x0d, 0xe0, 0x4f, 0x1d, 0xce, 0x47,
+ 0xf8, 0x7e, 0x8f, 0xa7, 0xbe, 0xbf, 0x77, 0xe1,
+ 0xdb, 0xc2, 0x0d, 0x6b, 0xa8, 0x5c, 0xb9, 0x14,
+ 0x3d, 0x51, 0x8b, 0x28, 0x5d, 0xfa, 0x04, 0xb6,
+ 0x98, 0xbf, 0x0c, 0xf7, 0x81, 0x9f, 0x20, 0xfa,
+ 0x7a, 0x28, 0x8e, 0xb0, 0x70, 0x3d, 0x99, 0x5c,
+ 0x59, 0x94, 0x0c, 0x7c, 0x66, 0xde, 0x57, 0xa9,
+ 0xb7, 0x0f, 0x82, 0x37, 0x9b, 0x70, 0xe2, 0x03,
+ 0x1e, 0x45, 0x0f, 0xcf, 0xd2, 0x18, 0x13, 0x26,
+ 0xfc, 0xd2, 0x8d, 0x88, 0x23, 0xba, 0xaa, 0x80,
+ 0xdf, 0x6e, 0x0f, 0x44, 0x35, 0x59, 0x64, 0x75,
+ 0x39, 0xfd, 0x89, 0x07, 0xc0, 0xff, 0xd9, 0xd7,
+ 0x9c, 0x13, 0x0e, 0xd8, 0x1c, 0x9a, 0xfd, 0x9b,
+ 0x7e, 0x84, 0x8c, 0x9f, 0xed, 0x38, 0x44, 0x3d,
+ 0x5d, 0x38, 0x0e, 0x53, 0xfb, 0xdb, 0x8a, 0xc8,
+ 0xc3, 0xd3, 0xf0, 0x68, 0x76, 0x05, 0x4f, 0x12,
+ 0x24, 0x61, 0x10, 0x7d, 0xe9, 0x2f, 0xea, 0x09,
+ 0xc6, 0xf6, 0x92, 0x3a, 0x18, 0x8d, 0x53, 0xaf,
+ 0xe5, 0x4a, 0x10, 0xf6, 0x0e, 0x6e, 0x9d, 0x5a,
+ 0x03, 0xd9, 0x96, 0xb5, 0xfb, 0xc8, 0x20, 0xf8,
+ 0xa6, 0x37, 0x11, 0x6a, 0x27, 0xad, 0x04, 0xb4,
+ 0x44, 0xa0, 0x93, 0x2d, 0xd6, 0x0f, 0xbd, 0x12,
+ 0x67, 0x1c, 0x11, 0xe1, 0xc0, 0xec, 0x73, 0xe7,
+ 0x89, 0x87, 0x9f, 0xaa, 0x3d, 0x42, 0xc6, 0x4d,
+ 0x20, 0xcd, 0x12, 0x52, 0x74, 0x2a, 0x37, 0x68,
+ 0xc2, 0x5a, 0x90, 0x15, 0x85, 0x88, 0x8e, 0xce,
+ 0xe1, 0xe6, 0x12, 0xd9, 0x93, 0x6b, 0x40, 0x3b,
+ 0x07, 0x75, 0x94, 0x9a, 0x66, 0xcd, 0xfd, 0x99,
+ 0xa2, 0x9b, 0x13, 0x45, 0xba, 0xa8, 0xd9, 0xd5,
+ 0x40, 0x0c, 0x91, 0x02, 0x4b, 0x0a, 0x60, 0x73,
+ 0x63, 0xb0, 0x13, 0xce, 0x5d, 0xe9, 0xae, 0x86,
+ 0x9d, 0x3b, 0x8d, 0x95, 0xb0, 0x57, 0x0b, 0x3c,
+ 0x2d, 0x39, 0x14, 0x22, 0xd3, 0x24, 0x50, 0xcb,
+ 0xcf, 0xae, 0x96, 0x65, 0x22, 0x86, 0xe9, 0x6d,
+ 0xec, 0x12, 0x14, 0xa9, 0x34, 0x65, 0x27, 0x98,
+ 0x0a, 0x81, 0x92, 0xea, 0xc1, 0xc3, 0x9a, 0x3a,
+ 0xaf, 0x6f, 0x15, 0x35, 0x1d, 0xa6, 0xbe, 0x76,
+ 0x4d, 0xf8, 0x97, 0x72, 0xec, 0x04, 0x07, 0xd0,
+ 0x6e, 0x44, 0x15, 0xbe, 0xfa, 0xe7, 0xc9, 0x25,
+ 0x80, 0xdf, 0x9b, 0xf5, 0x07, 0x49, 0x7c, 0x8f,
+ 0x29, 0x95, 0x16, 0x0d, 0x4e, 0x21, 0x8d, 0xaa,
+ 0xcb, 0x02, 0x94, 0x4a, 0xbf, 0x83, 0x34, 0x0c,
+ 0xe8, 0xbe, 0x16, 0x86, 0xa9, 0x60, 0xfa, 0xf9,
+ 0x0e, 0x2d, 0x90, 0xc5, 0x5c, 0xc6, 0x47, 0x5b,
+ 0xab, 0xc3, 0x17, 0x1a, 0x80, 0xa3, 0x63, 0x17,
+ 0x49, 0x54, 0x95, 0x5d, 0x71, 0x01, 0xda, 0xb1,
+ 0x6a, 0xe8, 0x17, 0x91, 0x67, 0xe2, 0x14, 0x44,
+ 0xb4, 0x43, 0xa9, 0xea, 0xaa, 0x7c, 0x91, 0xde,
+ 0x36, 0xd1, 0x18, 0xc3, 0x9d, 0x38, 0x9f, 0x8d,
+ 0xd4, 0x46, 0x9a, 0x84, 0x6c, 0x9a, 0x26, 0x2b,
+ 0xf7, 0xfa, 0x18, 0x48, 0x7a, 0x79, 0xe8, 0xde,
+ 0x11, 0x69, 0x9e, 0x0b, 0x8f, 0xdf, 0x55, 0x7c,
+ 0xb4, 0x87, 0x19, 0xd4, 0x53, 0xba, 0x71, 0x30,
+ 0x56, 0x10, 0x9b, 0x93, 0xa2, 0x18, 0xc8, 0x96,
+ 0x75, 0xac, 0x19, 0x5f, 0xb4, 0xfb, 0x06, 0x63,
+ 0x9b, 0x37, 0x97, 0x14, 0x49, 0x55, 0xb3, 0xc9,
+ 0x32, 0x7d, 0x1a, 0xec, 0x00, 0x3d, 0x42, 0xec,
+ 0xd0, 0xea, 0x98, 0xab, 0xf1, 0x9f, 0xfb, 0x4a,
+ 0xf3, 0x56, 0x1a, 0x67, 0xe7, 0x7c, 0x35, 0xbf,
+ 0x15, 0xc5, 0x9c, 0x24, 0x12, 0xda, 0x88, 0x1d,
+ 0xb0, 0x2b, 0x1b, 0xfb, 0xce, 0xbf, 0xac, 0x51,
+ 0x52, 0xbc, 0x99, 0xbc, 0x3f, 0x1d, 0x15, 0xf7,
+ 0x71, 0x00, 0x1b, 0x70, 0x29, 0xfe, 0xdb, 0x02,
+ 0x8f, 0x8b, 0x85, 0x2b, 0xc4, 0x40, 0x7e, 0xb8,
+ 0x3f, 0x89, 0x1c, 0x9c, 0xa7, 0x33, 0x25, 0x4f,
+ 0xdd, 0x1e, 0x9e, 0xdb, 0x56, 0x91, 0x9c, 0xe9,
+ 0xfe, 0xa2, 0x1c, 0x17, 0x40, 0x72, 0x52, 0x1c,
+ 0x18, 0x31, 0x9a, 0x54, 0xb5, 0xd4, 0xef, 0xbe,
+ 0xbd, 0xdf, 0x1d, 0x8b, 0x69, 0xb1, 0xcb, 0xf2,
+ 0x5f, 0x48, 0x9f, 0xcc, 0x98, 0x13, 0x72, 0x54,
+ 0x7c, 0xf4, 0x1d, 0x00, 0x8e, 0xf0, 0xbc, 0xa1,
+ 0x92, 0x6f, 0x93, 0x4b, 0x73, 0x5e, 0x09, 0x0b,
+ 0x3b, 0x25, 0x1e, 0xb3, 0x3a, 0x36, 0xf8, 0x2e,
+ 0xd9, 0xb2, 0x9c, 0xf4, 0xcb, 0x94, 0x41, 0x88,
+ 0xfa, 0x0e, 0x1e, 0x38, 0xdd, 0x77, 0x8f, 0x7d,
+ 0x1c, 0x9d, 0x98, 0x7b, 0x28, 0xd1, 0x32, 0xdf,
+ 0xb9, 0x73, 0x1f, 0xa4, 0xf4, 0xb4, 0x16, 0x93,
+ 0x5b, 0xe4, 0x9d, 0xe3, 0x05, 0x16, 0xaf, 0x35,
+ 0x78, 0x58, 0x1f, 0x2f, 0x13, 0xf5, 0x61, 0xc0,
+ 0x66, 0x33, 0x61, 0x94, 0x1e, 0xab, 0x24, 0x9a,
+ 0x4b, 0xc1, 0x23, 0xf8, 0xd1, 0x5c, 0xd7, 0x11,
+ 0xa9, 0x56, 0xa1, 0xbf, 0x20, 0xfe, 0x6e, 0xb7,
+ 0x8a, 0xea, 0x23, 0x73, 0x36, 0x1d, 0xa0, 0x42,
+ 0x6c, 0x79, 0xa5, 0x30, 0xc3, 0xbb, 0x1d, 0xe0,
+ 0xc9, 0x97, 0x22, 0xef, 0x1f, 0xde, 0x39, 0xac,
+ 0x2b, 0x00, 0xa0, 0xa8, 0xee, 0x7c, 0x80, 0x0a,
+ 0x08, 0xbc, 0x22, 0x64, 0xf8, 0x9f, 0x4e, 0xff,
+ 0xe6, 0x27, 0xac, 0x2f, 0x05, 0x31, 0xfb, 0x55,
+ 0x4f, 0x6d, 0x21, 0xd7, 0x4c, 0x59, 0x0a, 0x70,
+ 0xad, 0xfa, 0xa3, 0x90, 0xbd, 0xfb, 0xb3, 0xd6,
+ 0x8e, 0x46, 0x21, 0x5c, 0xab, 0x18, 0x7d, 0x23,
+ 0x68, 0xd5, 0xa7, 0x1f, 0x5e, 0xbe, 0xc0, 0x81,
+ 0xcd, 0x3b, 0x20, 0xc0, 0x82, 0xdb, 0xe4, 0xcd,
+ 0x2f, 0xac, 0xa2, 0x87, 0x73, 0x79, 0x5d, 0x6b,
+ 0x0c, 0x10, 0x20, 0x4b, 0x65, 0x9a, 0x93, 0x9e,
+ 0xf2, 0x9b, 0xbe, 0x10, 0x88, 0x24, 0x36, 0x24,
+ 0x42, 0x99, 0x27, 0xa7, 0xeb, 0x57, 0x6d, 0xd3,
+ 0xa0, 0x0e, 0xa5, 0xe0, 0x1a, 0xf5, 0xd4, 0x75,
+ 0x83, 0xb2, 0x27, 0x2c, 0x0c, 0x16, 0x1a, 0x80,
+ 0x65, 0x21, 0xa1, 0x6f, 0xf9, 0xb0, 0xa7, 0x22,
+ 0xc0, 0xcf, 0x26, 0xb0, 0x25, 0xd5, 0x83, 0x6e,
+ 0x22, 0x58, 0xa4, 0xf7, 0xd4, 0x77, 0x3a, 0xc8,
+ 0x01, 0xe4, 0x26, 0x3b, 0xc2, 0x94, 0xf4, 0x3d,
+ 0xef, 0x7f, 0xa8, 0x70, 0x3f, 0x3a, 0x41, 0x97,
+ 0x46, 0x35, 0x25, 0x88, 0x76, 0x52, 0xb0, 0xb2,
+ 0xa4, 0xa2, 0xa7, 0xcf, 0x87, 0xf0, 0x09, 0x14,
+ 0x87, 0x1e, 0x25, 0x03, 0x91, 0x13, 0xc7, 0xe1,
+ 0x61, 0x8d, 0xa3, 0x40, 0x64, 0xb5, 0x7a, 0x43,
+ 0xc4, 0x63, 0x24, 0x9f, 0xb8, 0xd0, 0x5e, 0x0f,
+ 0x26, 0xf4, 0xa6, 0xd8, 0x49, 0x72, 0xe7, 0xa9,
+ 0x05, 0x48, 0x24, 0x14, 0x5f, 0x91, 0x29, 0x5c,
+ 0xdb, 0xe3, 0x9a, 0x6f, 0x92, 0x0f, 0xac, 0xc6,
+ 0x59, 0x71, 0x2b, 0x46, 0xa5, 0x4b, 0xa2, 0x95,
+ 0xbb, 0xe6, 0xa9, 0x01, 0x54, 0xe9, 0x1b, 0x33,
+ 0x98, 0x5a, 0x2b, 0xcd, 0x42, 0x0a, 0xd5, 0xc6,
+ 0x7e, 0xc9, 0xad, 0x8e, 0xb7, 0xac, 0x68, 0x64,
+ 0xdb, 0x27, 0x2a, 0x51, 0x6b, 0xc9, 0x4c, 0x28,
+ 0x39, 0xb0, 0xa8, 0x16, 0x9a, 0x6b, 0xf5, 0x8e,
+ 0x1a, 0x0c, 0x2a, 0xda, 0x8c, 0x88, 0x3b, 0x7b,
+ 0xf4, 0x97, 0xa4, 0x91, 0x71, 0x26, 0x8e, 0xd1,
+ 0x5d, 0xdd, 0x29, 0x69, 0x38, 0x4e, 0x7f, 0xf4,
+ 0xbf, 0x4a, 0xab, 0x2e, 0xc9, 0xec, 0xc6, 0x52,
+ 0x9c, 0xf6, 0x29, 0xe2, 0xdf, 0x0f, 0x08, 0xa7,
+ 0x7a, 0x65, 0xaf, 0xa1, 0x2a, 0xa9, 0xb5, 0x05,
+ 0xdf, 0x8b, 0x28, 0x7e, 0xf6, 0xcc, 0x91, 0x49,
+ 0x3d, 0x1c, 0xaa, 0x39, 0x07, 0x6e, 0x28, 0xef,
+ 0x1e, 0xa0, 0x28, 0xf5, 0x11, 0x8d, 0xe6, 0x1a,
+ 0xe0, 0x2b, 0xb6, 0xae, 0xfc, 0x33, 0x43, 0xa0,
+ 0x50, 0x29, 0x2f, 0x19, 0x9f, 0x40, 0x18, 0x57,
+ 0xb2, 0xbe, 0xad, 0x5e, 0x6e, 0xe2, 0xa1, 0xf1,
+ 0x91, 0x02, 0x2f, 0x92, 0x78, 0x01, 0x6f, 0x04,
+ 0x77, 0x91, 0xa9, 0xd1, 0x8d, 0xa7, 0xd2, 0xa6,
+ 0xd2, 0x7f, 0x2e, 0x0e, 0x51, 0xc2, 0xf6, 0xea,
+ 0x30, 0xe8, 0xac, 0x49, 0xa0, 0x60, 0x4f, 0x4c,
+ 0x13, 0x54, 0x2e, 0x85, 0xb6, 0x83, 0x81, 0xb9,
+ 0xfd, 0xcf, 0xa0, 0xce, 0x4b, 0x2d, 0x34, 0x13,
+ 0x54, 0x85, 0x2d, 0x36, 0x02, 0x45, 0xc5, 0x36,
+ 0xb6, 0x12, 0xaf, 0x71, 0xf3, 0xe7, 0x7c, 0x90,
+ 0x95, 0xae, 0x2d, 0xbd, 0xe5, 0x04, 0xb2, 0x65,
+ 0x73, 0x3d, 0xab, 0xfe, 0x10, 0xa2, 0x0f, 0xc7,
+ 0xd6, 0xd3, 0x2c, 0x21, 0xcc, 0xc7, 0x2b, 0x8b,
+ 0x34, 0x44, 0xae, 0x66, 0x3d, 0x65, 0x92, 0x2d,
+ 0x17, 0xf8, 0x2c, 0xaa, 0x2b, 0x86, 0x5c, 0xd8,
+ 0x89, 0x13, 0xd2, 0x91, 0xa6, 0x58, 0x99, 0x02,
+ 0x6e, 0xa1, 0x32, 0x84, 0x39, 0x72, 0x3c, 0x19,
+ 0x8c, 0x36, 0xb0, 0xc3, 0xc8, 0xd0, 0x85, 0xbf,
+ 0xaf, 0x8a, 0x32, 0x0f, 0xde, 0x33, 0x4b, 0x4a,
+ 0x49, 0x19, 0xb4, 0x4c, 0x2b, 0x95, 0xf6, 0xe8,
+ 0xec, 0xf7, 0x33, 0x93, 0xf7, 0xf0, 0xd2, 0xa4,
+ 0x0e, 0x60, 0xb1, 0xd4, 0x06, 0x52, 0x6b, 0x02,
+ 0x2d, 0xdc, 0x33, 0x18, 0x10, 0xb1, 0xa5, 0xf7,
+ 0xc3, 0x47, 0xbd, 0x53, 0xed, 0x1f, 0x10, 0x5d,
+ 0x6a, 0x0d, 0x30, 0xab, 0xa4, 0x77, 0xe1, 0x78,
+ 0x88, 0x9a, 0xb2, 0xec, 0x55, 0xd5, 0x58, 0xde,
+ 0xab, 0x26, 0x30, 0x20, 0x43, 0x36, 0x96, 0x2b,
+ 0x4d, 0xb5, 0xb6, 0x63, 0xb6, 0x90, 0x2b, 0x89,
+ 0xe8, 0x5b, 0x31, 0xbc, 0x6a, 0xf5, 0x0f, 0xc5,
+ 0x0a, 0xcc, 0xb3, 0xfb, 0x9b, 0x57, 0xb6, 0x63,
+ 0x29, 0x70, 0x31, 0x37, 0x8d, 0xb4, 0x78, 0x96,
+ 0xd7, 0xfb, 0xaf, 0x6c, 0x60, 0x0a, 0xdd, 0x2c,
+ 0x67, 0xf9, 0x36, 0xdb, 0x03, 0x79, 0x86, 0xdb,
+ 0x85, 0x6e, 0xb4, 0x9c, 0xf2, 0xdb, 0x3f, 0x7d,
+ 0xa6, 0xd2, 0x36, 0x50, 0xe4, 0x38, 0xf1, 0x88,
+ 0x40, 0x41, 0xb0, 0x13, 0x11, 0x9e, 0x4c, 0x2a,
+ 0xe5, 0xaf, 0x37, 0xcc, 0xcd, 0xfb, 0x68, 0x66,
+ 0x07, 0x38, 0xb5, 0x8b, 0x3c, 0x59, 0xd1, 0xc0,
+ 0x24, 0x84, 0x37, 0x47, 0x2a, 0xba, 0x1f, 0x35,
+ 0xca, 0x1f, 0xb9, 0x0c, 0xd7, 0x14, 0xaa, 0x9f,
+ 0x63, 0x55, 0x34, 0xf4, 0x9e, 0x7c, 0x5b, 0xba,
+ 0x81, 0xc2, 0xb6, 0xb3, 0x6f, 0xde, 0xe2, 0x1c,
+ 0xa2, 0x7e, 0x34, 0x7f, 0x79, 0x3d, 0x2c, 0xe9,
+ 0x44, 0xed, 0xb2, 0x3c, 0x8c, 0x9b, 0x91, 0x4b,
+ 0xe1, 0x03, 0x35, 0xe3, 0x50, 0xfe, 0xb5, 0x07,
+ 0x03, 0x94, 0xb7, 0xa4, 0xa1, 0x5c, 0x0c, 0xa1,
+ 0x20, 0x28, 0x35, 0x68, 0xb7, 0xbf, 0xc2, 0x54,
+ 0xfe, 0x83, 0x8b, 0x13, 0x7a, 0x21, 0x47, 0xce,
+ 0x7c, 0x11, 0x3a, 0x3a, 0x4d, 0x65, 0x49, 0x9d,
+ 0x9e, 0x86, 0xb8, 0x7d, 0xbc, 0xc7, 0xf0, 0x3b,
+ 0xbd, 0x3a, 0x3a, 0xb1, 0xaa, 0x24, 0x3e, 0xce,
+ 0x5b, 0xa9, 0xbc, 0xf2, 0x5f, 0x82, 0x83, 0x6c,
+ 0xfe, 0x47, 0x3b, 0x2d, 0x83, 0xe7, 0xa7, 0x20,
+ 0x1c, 0xd0, 0xb9, 0x6a, 0x72, 0x45, 0x1e, 0x86,
+ 0x3f, 0x6c, 0x3b, 0xa6, 0x64, 0xa6, 0xd0, 0x73,
+ 0xd1, 0xf7, 0xb5, 0xed, 0x99, 0x08, 0x65, 0xd9,
+ 0x78, 0xbd, 0x38, 0x15, 0xd0, 0x60, 0x94, 0xfc,
+ 0x9a, 0x2a, 0xba, 0x52, 0x21, 0xc2, 0x2d, 0x5a,
+ 0xb9, 0x96, 0x38, 0x9e, 0x37, 0x21, 0xe3, 0xaf,
+ 0x5f, 0x05, 0xbe, 0xdd, 0xc2, 0x87, 0x5e, 0x0d,
+ 0xfa, 0xeb, 0x39, 0x02, 0x1e, 0xe2, 0x7a, 0x41,
+ 0x18, 0x7c, 0xbb, 0x45, 0xef, 0x40, 0xc3, 0xe7,
+ 0x3b, 0xc0, 0x39, 0x89, 0xf9, 0xa3, 0x0d, 0x12,
+ 0xc5, 0x4b, 0xa7, 0xd2, 0x14, 0x1d, 0xa8, 0xa8,
+ 0x75, 0x49, 0x3e, 0x65, 0x77, 0x6e, 0xf3, 0x5f,
+ 0x97, 0xde, 0xbc, 0x22, 0x86, 0xcc, 0x4a, 0xf9,
+ 0xb4, 0x62, 0x3e, 0xee, 0x90, 0x2f, 0x84, 0x0c,
+ 0x52, 0xf1, 0xb8, 0xad, 0x65, 0x89, 0x39, 0xae,
+ 0xf7, 0x1f, 0x3f, 0x72, 0xb9, 0xec, 0x1d, 0xe2,
+ 0x15, 0x88, 0xbd, 0x35, 0x48, 0x4e, 0xa4, 0x44,
+ 0x36, 0x34, 0x3f, 0xf9, 0x5e, 0xad, 0x6a, 0xb1,
+ 0xd8, 0xaf, 0xb1, 0xb2, 0xa3, 0x03, 0xdf, 0x1b,
+ 0x71, 0xe5, 0x3c, 0x4a, 0xea, 0x6b, 0x2e, 0x3e,
+ 0x93, 0x72, 0xbe, 0x0d, 0x1b, 0xc9, 0x97, 0x98,
+ 0xb0, 0xce, 0x3c, 0xc1, 0x0d, 0x2a, 0x59, 0x6d,
+ 0x56, 0x5d, 0xba, 0x82, 0xf8, 0x8c, 0xe4, 0xcf,
+ 0xf3, 0xb3, 0x3d, 0x5d, 0x24, 0xe9, 0xc0, 0x83,
+ 0x11, 0x24, 0xbf, 0x1a, 0xd5, 0x4b, 0x79, 0x25,
+ 0x32, 0x98, 0x3d, 0xd6, 0xc3, 0xa8, 0xb7, 0xd0
+};
+
+static const struct cmac_rfc4493_vector {
+ const uint8_t *key;
+ const uint8_t *sub_key1;
+ const uint8_t *sub_key2;
+ const uint8_t *M;
+ size_t len;
+ const uint8_t *T;
+ size_t T_len;
+ enum cmac_type type; /* vector type - std or 3gpp */
+} cmac_vectors[] = {
+ { key, sub_key1, sub_key2, M, 0, T_1, 16, CMAC },
+ { key, sub_key1, sub_key2, M, 16, T_2, 16, CMAC },
+ { key, sub_key1, sub_key2, M, 40, T_3, 16, CMAC },
+ { key, sub_key1, sub_key2, M, 64, T_4, 16, CMAC },
+ { key, sub_key1, sub_key2, M, 0, T_1, 15, CMAC },
+ { key, sub_key1, sub_key2, M, 16, T_2, 15, CMAC },
+ { key, sub_key1, sub_key2, M, 40, T_3, 15, CMAC },
+ { key, sub_key1, sub_key2, M, 64, T_4, 15, CMAC },
+ { key, sub_key1, sub_key2, M, 0, T_1, 12, CMAC },
+ { key, sub_key1, sub_key2, M, 16, T_2, 12, CMAC },
+ { key, sub_key1, sub_key2, M, 40, T_3, 12, CMAC },
+ { key, sub_key1, sub_key2, M, 64, T_4, 12, CMAC },
+ { key, sub_key1, sub_key2, M, 0, T_1, 4, CMAC },
+ { key, sub_key1, sub_key2, M, 16, T_2, 4, CMAC },
+ { key, sub_key1, sub_key2, M, 40, T_3, 4, CMAC },
+ { key, sub_key1, sub_key2, M, 64, T_4, 4, CMAC },
+ { key, sub_key1, sub_key2, M, 8, T_5, 16, CMAC },
+};
+
+static const struct cmac_rfc4493_vector cmac_3gpp_vectors[] = {
+ { EIA2_128_K_1, EIA2_128_SK1_1, EIA2_128_SK2_1,
+ EIA2_128_M_1, 122, EIA2_128_T_1, 4, CMAC_BITLEN },
+ { EIA2_128_K_2, EIA2_128_SK1_2, EIA2_128_SK2_2,
+ EIA2_128_M_2, 128, EIA2_128_T_2, 4, CMAC_BITLEN },
+ { EIA2_128_K_3, EIA2_128_SK1_3, EIA2_128_SK2_3,
+ EIA2_128_M_3, 318, EIA2_128_T_3, 4, CMAC_BITLEN },
+ { EIA2_128_K_4, EIA2_128_SK1_4, EIA2_128_SK2_4,
+ EIA2_128_M_4, 575, EIA2_128_T_4, 4, CMAC_BITLEN },
+ { EIA2_128_K_5, EIA2_128_SK1_5, EIA2_128_SK2_5,
+ EIA2_128_M_5, 832, EIA2_128_T_5, 4, CMAC_BITLEN },
+ { EIA2_128_K_6, EIA2_128_SK1_6, EIA2_128_SK2_6,
+ EIA2_128_M_6, 447, EIA2_128_T_6, 4, CMAC_BITLEN },
+ { EIA2_128_K_7, EIA2_128_SK1_7, EIA2_128_SK2_7,
+ EIA2_128_M_7, 2622, EIA2_128_T_7, 4, CMAC_BITLEN },
+ { EIA2_128_K_8, EIA2_128_SK1_8, EIA2_128_SK2_8,
+ EIA2_128_M_8, 16512, EIA2_128_T_8, 4, CMAC_BITLEN },
+};
+
+static int
+cmac_job_ok(const struct cmac_rfc4493_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const uint8_t *auth,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ const size_t auth_len = job->auth_tag_output_len_in_bytes;
+
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d", __LINE__, job->status);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + auth_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + auth_len], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(vec->T, &auth[sizeof_padding], auth_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding],
+ auth_len);
+ hexdump(stderr, "Expected", vec->T,
+ auth_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_cmac(struct MB_MGR *mb_mgr,
+ const struct cmac_rfc4493_vector *vec,
+ const int dir,
+ const int num_jobs,
+ const enum cmac_type type)
+{
+ DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+ DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+ uint32_t skey1[4], skey2[4];
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ auths[i] = malloc(16 + (sizeof(padding) * 2));
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+
+ memset(auths[i], -1, 16 + (sizeof(padding) * 2));
+ }
+
+ IMB_AES_KEYEXP_128(mb_mgr, vec->key, expkey, dust);
+ IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, expkey, skey1, skey2);
+
+ if (memcmp(vec->sub_key1, skey1, sizeof(skey1))) {
+ printf("sub-key1 mismatched\n");
+ hexdump(stderr, "Received", &skey1[0], sizeof(skey1));
+ hexdump(stderr, "Expected", vec->sub_key1, sizeof(skey1));
+ goto end;
+ }
+
+ if (memcmp(vec->sub_key2, skey2, sizeof(skey2))) {
+ printf("sub-key2 mismatched\n");
+ hexdump(stderr, "Received", &skey2[0], sizeof(skey2));
+ hexdump(stderr, "Expected", vec->sub_key2, sizeof(skey2));
+ goto end;
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ /**
+ * Submit all jobs then flush any outstanding jobs
+ */
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+
+ if (type == CMAC) {
+ job->hash_alg = AES_CMAC;
+ job->msg_len_to_hash_in_bytes = vec->len;
+ } else {
+ job->hash_alg = AES_CMAC_BITLEN;
+ /* check for std or 3gpp vectors
+ scale len if necessary */
+ if (vec->type == CMAC)
+ job->msg_len_to_hash_in_bits =
+ vec->len * 8;
+ else
+ job->msg_len_to_hash_in_bits =
+ vec->len;
+ }
+ job->u.CMAC._key_expanded = expkey;
+ job->u.CMAC._skey1 = skey1;
+ job->u.CMAC._skey2 = skey2;
+ job->src = vec->M;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->T_len;
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ if (num_jobs < 4) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ if (!cmac_job_ok(vec, job, job->user_data, padding,
+ sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+
+ if (!cmac_job_ok(vec, job, job->user_data, padding,
+ sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+
+ /**
+ * Submit each job and flush immediately
+ */
+ for (i = 0; i < num_jobs; i++) {
+ struct JOB_AES_HMAC *first_job = NULL;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ first_job = job;
+
+ job->cipher_direction = dir;
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+
+ if (type == CMAC) {
+ job->hash_alg = AES_CMAC;
+ job->msg_len_to_hash_in_bytes = vec->len;
+ } else {
+ job->hash_alg = AES_CMAC_BITLEN;
+ if (vec->type == CMAC)
+ job->msg_len_to_hash_in_bits = vec->len * 8;
+ else
+ job->msg_len_to_hash_in_bits = vec->len;
+ }
+ job->u.CMAC._key_expanded = expkey;
+ job->u.CMAC._skey1 = skey1;
+ job->u.CMAC._skey2 = skey2;
+ job->src = vec->M;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->T_len;
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != NULL) {
+ printf("Received job, expected NULL\n");
+ goto end;
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ if (job != first_job) {
+ printf("Invalid return job received\n");
+ goto end;
+ }
+ if (!cmac_job_ok(vec, job, job->user_data, padding,
+ sizeof(padding)))
+ goto end;
+ }
+ }
+
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_cmac_std_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt = sizeof(cmac_vectors) / sizeof(cmac_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("AES-CMAC standard test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("Standard vector [%d/%d] M len: %d, T len:%d\n",
+ vect, vectors_cnt,
+ (int) cmac_vectors[idx].len,
+ (int) cmac_vectors[idx].T_len);
+#else
+ printf(".");
+#endif
+
+ if (test_cmac(mb_mgr, &cmac_vectors[idx],
+ ENCRYPT, num_jobs, CMAC)) {
+ printf("error #%d encrypt\n", vect);
+ errors++;
+ }
+
+ if (test_cmac(mb_mgr, &cmac_vectors[idx],
+ DECRYPT, num_jobs, CMAC)) {
+ printf("error #%d decrypt\n", vect);
+ errors++;
+ }
+
+ }
+ printf("\n");
+ return errors;
+}
+
+static int
+test_cmac_bitlen_std_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt = sizeof(cmac_vectors) / sizeof(cmac_vectors[0]);
+ int vect;
+ int errors = 0;
+
+
+ printf("AES-CMAC BITLEN standard test vectors "
+ "(N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("Standard vector [%d/%d] M len: %d (bits), "
+ "T len:%d\n",
+ vect, vectors_cnt,
+ (int) cmac_vectors[idx].len * 8,
+ (int) cmac_vectors[idx].T_len);
+#else
+ printf(".");
+#endif
+
+ if (test_cmac(mb_mgr, &cmac_vectors[idx],
+ ENCRYPT, num_jobs, CMAC_BITLEN)) {
+ printf("error #%d encrypt\n", vect);
+ errors++;
+ }
+
+ if (test_cmac(mb_mgr, &cmac_vectors[idx],
+ DECRYPT, num_jobs, CMAC_BITLEN)) {
+ printf("error #%d decrypt\n", vect);
+ errors++;
+ }
+
+ }
+ printf("\n");
+ return errors;
+}
+
+static int
+test_cmac_bitlen_3gpp_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt =
+ sizeof(cmac_3gpp_vectors) / sizeof(cmac_3gpp_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("AES-CMAC BITLEN 3GPP test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("3GPP vector [%d/%d] M len: %d (bits), "
+ "T len:%d (bytes)\n",
+ vect, vectors_cnt,
+ (int) cmac_3gpp_vectors[idx].len,
+ (int) cmac_3gpp_vectors[idx].T_len);
+#else
+ printf(".");
+#endif
+
+ if (test_cmac(mb_mgr, &cmac_3gpp_vectors[idx],
+ ENCRYPT, num_jobs, CMAC_BITLEN)) {
+ printf("error #%d encrypt\n", vect);
+ errors++;
+ }
+
+ if (test_cmac(mb_mgr, &cmac_3gpp_vectors[idx],
+ DECRYPT, num_jobs, CMAC_BITLEN)) {
+ printf("error #%d decrypt\n", vect);
+ errors++;
+ }
+
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+cmac_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int i, errors = 0;
+
+ (void) arch; /* unused */
+
+ /* CMAC with standard vectors */
+ for (i = 1; i < 10; i++)
+ errors += test_cmac_std_vectors(mb_mgr, i);
+
+ /* CMAC BITLEN with standard vectors */
+ for (i = 1; i < 10; i++)
+ errors += test_cmac_bitlen_std_vectors(mb_mgr, i);
+
+ /* CMAC BITLEN with 3GPP vectors */
+ for (i = 1; i < 10; i++)
+ errors += test_cmac_bitlen_3gpp_vectors(mb_mgr, i);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/ctr_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/ctr_test.c
new file mode 100644
index 000000000..ad4e9655b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/ctr_test.c
@@ -0,0 +1,1497 @@
+/*****************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+#define BYTE_ROUND_UP(x) ((x + 7) / 8)
+/*
+ * Test Vector from
+ * https://tools.ietf.org/html/rfc3686
+ *
+ */
+/*
+ Test Vector #1: Encrypting 16 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Single block msg'
+ Plaintext : 53 69 6E 67 6C 65 20 62 6C 6F 63 6B 20 6D 73 67
+ Counter Block (1): 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 01
+ Key Stream (1): B7 60 33 28 DB C2 93 1B 41 0E 16 C8 06 7E 62 DF
+ Ciphertext : E4 09 5D 4F B7 A7 B3 79 2D 61 75 A3 26 13 11 B8
+*/
+static const uint8_t K1_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV1_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P1_CTR[] = {
+ 0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62,
+ 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73, 0x67,
+};
+static const uint8_t C1_CTR[] = {
+ 0xE4, 0x09, 0x5D, 0x4F, 0xB7, 0xA7, 0xB3, 0x79,
+ 0x2D, 0x61, 0x75, 0xA3, 0x26, 0x13, 0x11, 0xB8,
+};
+static const uint8_t T1_CTR[] = { 0 };
+static const uint8_t A1_CTR[] = { 0 };
+#define A1_CTR_len 0
+
+/*
+ Test Vector #2: Encrypting 32 octets using AES-CTR with 128-bit key
+ AES Key : 7E 24 06 78 17 FA E0 D7 43 D6 CE 1F 32 53 91 63
+ AES-CTR IV : C0 54 3B 59 DA 48 D9 0B
+ Nonce : 00 6C B6 DB
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ Counter Block (1): 00 6C B6 DB C0 54 3B 59 DA 48 D9 0B 00 00 00 01
+ Key Stream (1): 51 05 A3 05 12 8F 74 DE 71 04 4B E5 82 D7 DD 87
+ Counter Block (2): 00 6C B6 DB C0 54 3B 59 DA 48 D9 0B 00 00 00 02
+ Key Stream (2): FB 3F 0C EF 52 CF 41 DF E4 FF 2A C4 8D 5C A0 37
+ Ciphertext : 51 04 A1 06 16 8A 72 D9 79 0D 41 EE 8E DA D3 88
+ : EB 2E 1E FC 46 DA 57 C8 FC E6 30 DF 91 41 BE 28
+*/
+static const uint8_t K2_CTR[] = {
+ 0x7E, 0x24, 0x06, 0x78, 0x17, 0xFA, 0xE0, 0xD7,
+ 0x43, 0xD6, 0xCE, 0x1F, 0x32, 0x53, 0x91, 0x63,
+};
+static const uint8_t IV2_CTR[] = {
+ 0x00, 0x6C, 0xB6, 0xDB, /* nonce */
+ 0xC0, 0x54, 0x3B, 0x59, 0xDA, 0x48, 0xD9, 0x0B,
+};
+static const uint8_t P2_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+};
+static const uint8_t C2_CTR[] = {
+ 0x51, 0x04, 0xA1, 0x06, 0x16, 0x8A, 0x72, 0xD9,
+ 0x79, 0x0D, 0x41, 0xEE, 0x8E, 0xDA, 0xD3, 0x88,
+ 0xEB, 0x2E, 0x1E, 0xFC, 0x46, 0xDA, 0x57, 0xC8,
+ 0xFC, 0xE6, 0x30, 0xDF, 0x91, 0x41, 0xBE, 0x28,
+};
+static const uint8_t T2_CTR[] = { 0 };
+static const uint8_t A2_CTR[] = { 0 };
+#define A2_CTR_len 0
+
+/*
+ Test Vector #3: Encrypting 36 octets using AES-CTR with 128-bit key
+ AES Key : 76 91 BE 03 5E 50 20 A8 AC 6E 61 85 29 F9 A0 DC
+ AES-CTR IV : 27 77 7F 3F 4A 17 86 F0
+ Nonce : 00 E0 01 7B
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ : 20 21 22 23
+ Counter Block (1): 00 E0 01 7B 27 77 7F 3F 4A 17 86 F0 00 00 00 01
+ Key Stream (1): C1 CE 4A AB 9B 2A FB DE C7 4F 58 E2 E3 D6 7C D8
+ Counter Block (2): 00 E0 01 7B 27 77 7F 3F 4A 17 86 F0 00 00 00 02
+ Key Stream (2): 55 51 B6 38 CA 78 6E 21 CD 83 46 F1 B2 EE 0E 4C
+ Counter Block (3): 00 E0 01 7B 27 77 7F 3F 4A 17 86 F0 00 00 00 03
+ Key Stream (3): 05 93 25 0C 17 55 36 00 A6 3D FE CF 56 23 87 E9
+ Ciphertext : C1 CF 48 A8 9F 2F FD D9 CF 46 52 E9 EF DB 72 D7
+ : 45 40 A4 2B DE 6D 78 36 D5 9A 5C EA AE F3 10 53
+ : 25 B2 07 2F
+*/
+static const uint8_t K3_CTR[] = {
+ 0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8,
+ 0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0, 0xDC,
+};
+static const uint8_t IV3_CTR[] = {
+ 0x00, 0xE0, 0x01, 0x7B, /* nonce */
+ 0x27, 0x77, 0x7F, 0x3F, 0x4A, 0x17, 0x86, 0xF0,
+};
+static const uint8_t P3_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t C3_CTR[] = {
+ 0xC1, 0xCF, 0x48, 0xA8, 0x9F, 0x2F, 0xFD, 0xD9,
+ 0xCF, 0x46, 0x52, 0xE9, 0xEF, 0xDB, 0x72, 0xD7,
+ 0x45, 0x40, 0xA4, 0x2B, 0xDE, 0x6D, 0x78, 0x36,
+ 0xD5, 0x9A, 0x5C, 0xEA, 0xAE, 0xF3, 0x10, 0x53,
+ 0x25, 0xB2, 0x07, 0x2F,
+};
+static const uint8_t T3_CTR[] = { 0 };
+static const uint8_t A3_CTR[] = { 0 };
+#define A3_CTR_len 0
+
+/*
+ Test Vector #4: Encrypting 16 octets using AES-CTR with 192-bit key
+ AES Key : 16 AF 5B 14 5F C9 F5 79 C1 75 F9 3E 3B FB 0E ED
+ : 86 3D 06 CC FD B7 85 15
+ AES-CTR IV : 36 73 3C 14 7D 6D 93 CB
+ Nonce : 00 00 00 48
+ Plaintext String : 'Single block msg'
+ Plaintext : 53 69 6E 67 6C 65 20 62 6C 6F 63 6B 20 6D 73 67
+ Counter Block (1): 00 00 00 48 36 73 3C 14 7D 6D 93 CB 00 00 00 01
+ Key Stream (1): 18 3C 56 28 8E 3C E9 AA 22 16 56 CB 23 A6 9A 4F
+ Ciphertext : 4B 55 38 4F E2 59 C9 C8 4E 79 35 A0 03 CB E9 28
+*/
+static const uint8_t K4_CTR[] = {
+ 0x16, 0xAF, 0x5B, 0x14, 0x5F, 0xC9, 0xF5, 0x79,
+ 0xC1, 0x75, 0xF9, 0x3E, 0x3B, 0xFB, 0x0E, 0xED,
+ 0x86, 0x3D, 0x06, 0xCC, 0xFD, 0xB7, 0x85, 0x15,
+};
+static const uint8_t IV4_CTR[] = {
+ 0x00, 0x00, 0x00, 0x48, /* nonce */
+ 0x36, 0x73, 0x3C, 0x14, 0x7D, 0x6D, 0x93, 0xCB,
+};
+static const uint8_t P4_CTR[] = {
+ 0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62,
+ 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73, 0x67,
+};
+static const uint8_t C4_CTR[] = {
+ 0x4B, 0x55, 0x38, 0x4F, 0xE2, 0x59, 0xC9, 0xC8,
+ 0x4E, 0x79, 0x35, 0xA0, 0x03, 0xCB, 0xE9, 0x28,
+};
+static const uint8_t T4_CTR[] = { 0 };
+static const uint8_t A4_CTR[] = { 0 };
+#define A4_CTR_len 0
+
+/*
+ Test Vector #5: Encrypting 32 octets using AES-CTR with 192-bit key
+ AES Key : 7C 5C B2 40 1B 3D C3 3C 19 E7 34 08 19 E0 F6 9C
+ : 67 8C 3D B8 E6 F6 A9 1A
+ AES-CTR IV : 02 0C 6E AD C2 CB 50 0D
+ Nonce : 00 96 B0 3B
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ Counter Block (1): 00 96 B0 3B 02 0C 6E AD C2 CB 50 0D 00 00 00 01
+ Key Stream (1): 45 33 41 FF 64 9E 25 35 76 D6 A0 F1 7D 3C C3 90
+ Counter Block (2): 00 96 B0 3B 02 0C 6E AD C2 CB 50 0D 00 00 00 02
+ Key Stream (2): 94 81 62 0F 4E C1 B1 8B E4 06 FA E4 5E E9 E5 1F
+ Ciphertext : 45 32 43 FC 60 9B 23 32 7E DF AA FA 71 31 CD 9F
+ : 84 90 70 1C 5A D4 A7 9C FC 1F E0 FF 42 F4 FB 00
+*/
+static const uint8_t K5_CTR[] = {
+ 0x7C, 0x5C, 0xB2, 0x40, 0x1B, 0x3D, 0xC3, 0x3C,
+ 0x19, 0xE7, 0x34, 0x08, 0x19, 0xE0, 0xF6, 0x9C,
+ 0x67, 0x8C, 0x3D, 0xB8, 0xE6, 0xF6, 0xA9, 0x1A,
+};
+static const uint8_t IV5_CTR[] = {
+ 0x00, 0x96, 0xB0, 0x3B, /* nonce */
+ 0x02, 0x0C, 0x6E, 0xAD, 0xC2, 0xCB, 0x50, 0x0D,
+};
+static const uint8_t P5_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+};
+static const uint8_t C5_CTR[] = {
+ 0x45, 0x32, 0x43, 0xFC, 0x60, 0x9B, 0x23, 0x32,
+ 0x7E, 0xDF, 0xAA, 0xFA, 0x71, 0x31, 0xCD, 0x9F,
+ 0x84, 0x90, 0x70, 0x1C, 0x5A, 0xD4, 0xA7, 0x9C,
+ 0xFC, 0x1F, 0xE0, 0xFF, 0x42, 0xF4, 0xFB, 0x00,
+};
+static const uint8_t T5_CTR[] = { 0 };
+static const uint8_t A5_CTR[] = { 0 };
+#define A5_CTR_len 0
+
+/*
+ Test Vector #6: Encrypting 36 octets using AES-CTR with 192-bit key
+ AES Key : 02 BF 39 1E E8 EC B1 59 B9 59 61 7B 09 65 27 9B
+ : F5 9B 60 A7 86 D3 E0 FE
+ AES-CTR IV : 5C BD 60 27 8D CC 09 12
+ Nonce : 00 07 BD FD
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ : 20 21 22 23
+ Counter Block (1): 00 07 BD FD 5C BD 60 27 8D CC 09 12 00 00 00 01
+ Key Stream (1): 96 88 3D C6 5A 59 74 28 5C 02 77 DA D1 FA E9 57
+ Counter Block (2): 00 07 BD FD 5C BD 60 27 8D CC 09 12 00 00 00 02
+ Key Stream (2): C2 99 AE 86 D2 84 73 9F 5D 2F D2 0A 7A 32 3F 97
+ Counter Block (3): 00 07 BD FD 5C BD 60 27 8D CC 09 12 00 00 00 03
+ Key Stream (3): 8B CF 2B 16 39 99 B2 26 15 B4 9C D4 FE 57 39 98
+ Ciphertext : 96 89 3F C5 5E 5C 72 2F 54 0B 7D D1 DD F7 E7 58
+ : D2 88 BC 95 C6 91 65 88 45 36 C8 11 66 2F 21 88
+ : AB EE 09 35
+*/
+static const uint8_t K6_CTR[] = {
+ 0x02, 0xBF, 0x39, 0x1E, 0xE8, 0xEC, 0xB1, 0x59,
+ 0xB9, 0x59, 0x61, 0x7B, 0x09, 0x65, 0x27, 0x9B,
+ 0xF5, 0x9B, 0x60, 0xA7, 0x86, 0xD3, 0xE0, 0xFE,
+};
+static const uint8_t IV6_CTR[] = {
+ 0x00, 0x07, 0xBD, 0xFD, /* nonce */
+ 0x5C, 0xBD, 0x60, 0x27, 0x8D, 0xCC, 0x09, 0x12,
+};
+static const uint8_t P6_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t C6_CTR[] = {
+ 0x96, 0x89, 0x3F, 0xC5, 0x5E, 0x5C, 0x72, 0x2F,
+ 0x54, 0x0B, 0x7D, 0xD1, 0xDD, 0xF7, 0xE7, 0x58,
+ 0xD2, 0x88, 0xBC, 0x95, 0xC6, 0x91, 0x65, 0x88,
+ 0x45, 0x36, 0xC8, 0x11, 0x66, 0x2F, 0x21, 0x88,
+ 0xAB, 0xEE, 0x09, 0x35,
+};
+static const uint8_t T6_CTR[] = { 0 };
+static const uint8_t A6_CTR[] = { 0 };
+#define A6_CTR_len 0
+
+/*
+ Test Vector #7: Encrypting 16 octets using AES-CTR with 256-bit key
+ AES Key : 77 6B EF F2 85 1D B0 6F 4C 8A 05 42 C8 69 6F 6C
+ : 6A 81 AF 1E EC 96 B4 D3 7F C1 D6 89 E6 C1 C1 04
+ AES-CTR IV : DB 56 72 C9 7A A8 F0 B2
+ Nonce : 00 00 00 60
+ Plaintext String : 'Single block msg'
+ Plaintext : 53 69 6E 67 6C 65 20 62 6C 6F 63 6B 20 6D 73 67
+ Counter Block (1): 00 00 00 60 DB 56 72 C9 7A A8 F0 B2 00 00 00 01
+ Key Stream (1): 47 33 BE 7A D3 E7 6E A5 3A 67 00 B7 51 8E 93 A7
+ Ciphertext : 14 5A D0 1D BF 82 4E C7 56 08 63 DC 71 E3 E0 C0
+*/
+static const uint8_t K7_CTR[] = {
+ 0x77, 0x6B, 0xEF, 0xF2, 0x85, 0x1D, 0xB0, 0x6F,
+ 0x4C, 0x8A, 0x05, 0x42, 0xC8, 0x69, 0x6F, 0x6C,
+ 0x6A, 0x81, 0xAF, 0x1E, 0xEC, 0x96, 0xB4, 0xD3,
+ 0x7F, 0xC1, 0xD6, 0x89, 0xE6, 0xC1, 0xC1, 0x04,
+};
+static const uint8_t IV7_CTR[] = {
+ 0x00, 0x00, 0x00, 0x60, /* nonce */
+ 0xDB, 0x56, 0x72, 0xC9, 0x7A, 0xA8, 0xF0, 0xB2,
+};
+static const uint8_t P7_CTR[] = {
+ 0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62,
+ 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73, 0x67,
+};
+static const uint8_t C7_CTR[] = {
+ 0x14, 0x5A, 0xD0, 0x1D, 0xBF, 0x82, 0x4E, 0xC7,
+ 0x56, 0x08, 0x63, 0xDC, 0x71, 0xE3, 0xE0, 0xC0,
+};
+static const uint8_t T7_CTR[] = { 0 };
+static const uint8_t A7_CTR[] = { 0 };
+#define A7_CTR_len 0
+
+/*
+ Test Vector #8: Encrypting 32 octets using AES-CTR with 256-bit key
+ AES Key : F6 D6 6D 6B D5 2D 59 BB 07 96 36 58 79 EF F8 86
+ : C6 6D D5 1A 5B 6A 99 74 4B 50 59 0C 87 A2 38 84
+ AES-CTR IV : C1 58 5E F1 5A 43 D8 75
+ Nonce : 00 FA AC 24
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ Counter block (1): 00 FA AC 24 C1 58 5E F1 5A 43 D8 75 00 00 00 01
+ Key stream (1): F0 5F 21 18 3C 91 67 2B 41 E7 0A 00 8C 43 BC A6
+ Counter block (2): 00 FA AC 24 C1 58 5E F1 5A 43 D8 75 00 00 00 02
+ Key stream (2): A8 21 79 43 9B 96 8B 7D 4D 29 99 06 8F 59 B1 03
+ Ciphertext : F0 5E 23 1B 38 94 61 2C 49 EE 00 0B 80 4E B2 A9
+ : B8 30 6B 50 8F 83 9D 6A 55 30 83 1D 93 44 AF 1C
+*/
+static const uint8_t K8_CTR[] = {
+ 0xF6, 0xD6, 0x6D, 0x6B, 0xD5, 0x2D, 0x59, 0xBB,
+ 0x07, 0x96, 0x36, 0x58, 0x79, 0xEF, 0xF8, 0x86,
+ 0xC6, 0x6D, 0xD5, 0x1A, 0x5B, 0x6A, 0x99, 0x74,
+ 0x4B, 0x50, 0x59, 0x0C, 0x87, 0xA2, 0x38, 0x84,
+};
+static const uint8_t IV8_CTR[] = {
+ 0x00, 0xFA, 0xAC, 0x24, /* nonce */
+ 0xC1, 0x58, 0x5E, 0xF1, 0x5A, 0x43, 0xD8, 0x75,
+};
+static const uint8_t P8_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+};
+static const uint8_t C8_CTR[] = {
+ 0xF0, 0x5E, 0x23, 0x1B, 0x38, 0x94, 0x61, 0x2C,
+ 0x49, 0xEE, 0x00, 0x0B, 0x80, 0x4E, 0xB2, 0xA9,
+ 0xB8, 0x30, 0x6B, 0x50, 0x8F, 0x83, 0x9D, 0x6A,
+ 0x55, 0x30, 0x83, 0x1D, 0x93, 0x44, 0xAF, 0x1C,
+};
+static const uint8_t T8_CTR[] = { 0 };
+static const uint8_t A8_CTR[] = { 0 };
+#define A8_CTR_len 0
+
+/*
+ Test Vector #9: Encrypting 36 octets using AES-CTR with 256-bit key
+ AES Key : FF 7A 61 7C E6 91 48 E4 F1 72 6E 2F 43 58 1D E2
+ : AA 62 D9 F8 05 53 2E DF F1 EE D6 87 FB 54 15 3D
+ AES-CTR IV : 51 A5 1D 70 A1 C1 11 48
+ Nonce : 00 1C C5 B7
+ Plaintext : 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
+ : 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F
+ : 20 21 22 23
+ Counter block (1): 00 1C C5 B7 51 A5 1D 70 A1 C1 11 48 00 00 00 01
+ Key stream (1): EB 6D 50 81 19 0E BD F0 C6 7C 9E 4D 26 C7 41 A5
+ Counter block (2): 00 1C C5 B7 51 A5 1D 70 A1 C1 11 48 00 00 00 02
+ Key stream (2): A4 16 CD 95 71 7C EB 10 EC 95 DA AE 9F CB 19 00
+ Counter block (3): 00 1C C5 B7 51 A5 1D 70 A1 C1 11 48 00 00 00 03
+ Key stream (3): 3E E1 C4 9B C6 B9 CA 21 3F 6E E2 71 D0 A9 33 39
+ Ciphertext : EB 6C 52 82 1D 0B BB F7 CE 75 94 46 2A CA 4F AA
+ : B4 07 DF 86 65 69 FD 07 F4 8C C0 B5 83 D6 07 1F
+ : 1E C0 E6 B8
+*/
+static const uint8_t K9_CTR[] = {
+ 0xFF, 0x7A, 0x61, 0x7C, 0xE6, 0x91, 0x48, 0xE4,
+ 0xF1, 0x72, 0x6E, 0x2F, 0x43, 0x58, 0x1D, 0xE2,
+ 0xAA, 0x62, 0xD9, 0xF8, 0x05, 0x53, 0x2E, 0xDF,
+ 0xF1, 0xEE, 0xD6, 0x87, 0xFB, 0x54, 0x15, 0x3D,
+};
+static const uint8_t IV9_CTR[] = {
+ 0x00, 0x1C, 0xC5, 0xB7, /* nonce */
+ 0x51, 0xA5, 0x1D, 0x70, 0xA1, 0xC1, 0x11, 0x48,
+};
+static const uint8_t P9_CTR[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+ 0x20, 0x21, 0x22, 0x23,
+};
+static const uint8_t C9_CTR[] = {
+ 0xEB, 0x6C, 0x52, 0x82, 0x1D, 0x0B, 0xBB, 0xF7,
+ 0xCE, 0x75, 0x94, 0x46, 0x2A, 0xCA, 0x4F, 0xAA,
+ 0xB4, 0x07, 0xDF, 0x86, 0x65, 0x69, 0xFD, 0x07,
+ 0xF4, 0x8C, 0xC0, 0xB5, 0x83, 0xD6, 0x07, 0x1F,
+ 0x1E, 0xC0, 0xE6, 0xB8,
+};
+static const uint8_t T9_CTR[] = { 0 };
+static const uint8_t A9_CTR[] = { 0 };
+#define A9_CTR_len 0
+
+/*
+ Test Vector #10: Encrypting 128 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Full 8 blocks'
+*/
+static const uint8_t K10_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV10_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P10_CTR[] = {
+ 0xA8, 0x63, 0x44, 0xF8, 0x36, 0x59, 0x2F, 0xF2,
+ 0xDA, 0xDD, 0x17, 0xCE, 0xFE, 0x2A, 0xF2, 0xA2,
+ 0x35, 0x87, 0x34, 0x0F, 0x35, 0xFC, 0xD8, 0xF2,
+ 0x57, 0xA1, 0xCB, 0x19, 0x0C, 0x33, 0x14, 0xE1,
+ 0x23, 0xEB, 0xC0, 0x88, 0x82, 0x05, 0x5F, 0x01,
+ 0x5D, 0xFC, 0x53, 0x08, 0xDB, 0x34, 0x8E, 0x94,
+ 0xE4, 0xA8, 0x26, 0x7F, 0xBC, 0xB7, 0x8B, 0xE1,
+ 0x58, 0x2F, 0x2C, 0x91, 0xCD, 0x5B, 0x4A, 0xAA,
+ 0x7A, 0xBA, 0x5F, 0xD2, 0x9B, 0xF8, 0x7D, 0xEA,
+ 0x76, 0xB6, 0x64, 0xB3, 0x29, 0xD3, 0x02, 0x19,
+ 0xA0, 0xDC, 0xE9, 0xB8, 0x90, 0x51, 0xA8, 0xDE,
+ 0x2E, 0xA1, 0xB7, 0x7E, 0x51, 0x0D, 0x34, 0xB3,
+ 0xED, 0xE7, 0x5E, 0xB8, 0x8A, 0xE9, 0xFE, 0x89,
+ 0xF8, 0x0B, 0x85, 0x09, 0x76, 0x08, 0x78, 0x0D,
+ 0x27, 0x59, 0x8E, 0x14, 0x43, 0x46, 0xA0, 0x91,
+ 0xEE, 0xAA, 0xFF, 0x74, 0x8D, 0xBC, 0x98, 0xB9
+};
+
+static const uint8_t C10_CTR[] = {
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A
+};
+
+static const uint8_t T10_CTR[] = { 0 };
+static const uint8_t A10_CTR[] = { 0 };
+#define A10_CTR_len 0
+
+/*
+ Test Vector #11: Encrypting 136 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Full 8 blocks + extra partial block'
+*/
+static const uint8_t K11_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV11_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P11_CTR[] = {
+ 0xA8, 0x63, 0x44, 0xF8, 0x36, 0x59, 0x2F, 0xF2,
+ 0xDA, 0xDD, 0x17, 0xCE, 0xFE, 0x2A, 0xF2, 0xA2,
+ 0x35, 0x87, 0x34, 0x0F, 0x35, 0xFC, 0xD8, 0xF2,
+ 0x57, 0xA1, 0xCB, 0x19, 0x0C, 0x33, 0x14, 0xE1,
+ 0x23, 0xEB, 0xC0, 0x88, 0x82, 0x05, 0x5F, 0x01,
+ 0x5D, 0xFC, 0x53, 0x08, 0xDB, 0x34, 0x8E, 0x94,
+ 0xE4, 0xA8, 0x26, 0x7F, 0xBC, 0xB7, 0x8B, 0xE1,
+ 0x58, 0x2F, 0x2C, 0x91, 0xCD, 0x5B, 0x4A, 0xAA,
+ 0x7A, 0xBA, 0x5F, 0xD2, 0x9B, 0xF8, 0x7D, 0xEA,
+ 0x76, 0xB6, 0x64, 0xB3, 0x29, 0xD3, 0x02, 0x19,
+ 0xA0, 0xDC, 0xE9, 0xB8, 0x90, 0x51, 0xA8, 0xDE,
+ 0x2E, 0xA1, 0xB7, 0x7E, 0x51, 0x0D, 0x34, 0xB3,
+ 0xED, 0xE7, 0x5E, 0xB8, 0x8A, 0xE9, 0xFE, 0x89,
+ 0xF8, 0x0B, 0x85, 0x09, 0x76, 0x08, 0x78, 0x0D,
+ 0x27, 0x59, 0x8E, 0x14, 0x43, 0x46, 0xA0, 0x91,
+ 0xEE, 0xAA, 0xFF, 0x74, 0x8D, 0xBC, 0x98, 0xB9,
+ 0x12, 0xAD, 0x82, 0xDF, 0x2F, 0xF8, 0x9C, 0xE0
+};
+
+static const uint8_t C11_CTR[] = {
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0x7A, 0x13, 0xB4, 0x40, 0x69, 0x9B, 0x58, 0x16
+};
+static const uint8_t T11_CTR[] = { 0 };
+static const uint8_t A11_CTR[] = { 0 };
+#define A11_CTR_len 0
+
+/*
+ Test Vector #12: Encrypting 256 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Full 2x8 blocks'
+*/
+static const uint8_t K12_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV12_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P12_CTR[] = {
+ 0xA8, 0x63, 0x44, 0xF8, 0x36, 0x59, 0x2F, 0xF2,
+ 0xDA, 0xDD, 0x17, 0xCE, 0xFE, 0x2A, 0xF2, 0xA2,
+ 0x35, 0x87, 0x34, 0x0F, 0x35, 0xFC, 0xD8, 0xF2,
+ 0x57, 0xA1, 0xCB, 0x19, 0x0C, 0x33, 0x14, 0xE1,
+ 0x23, 0xEB, 0xC0, 0x88, 0x82, 0x05, 0x5F, 0x01,
+ 0x5D, 0xFC, 0x53, 0x08, 0xDB, 0x34, 0x8E, 0x94,
+ 0xE4, 0xA8, 0x26, 0x7F, 0xBC, 0xB7, 0x8B, 0xE1,
+ 0x58, 0x2F, 0x2C, 0x91, 0xCD, 0x5B, 0x4A, 0xAA,
+ 0x7A, 0xBA, 0x5F, 0xD2, 0x9B, 0xF8, 0x7D, 0xEA,
+ 0x76, 0xB6, 0x64, 0xB3, 0x29, 0xD3, 0x02, 0x19,
+ 0xA0, 0xDC, 0xE9, 0xB8, 0x90, 0x51, 0xA8, 0xDE,
+ 0x2E, 0xA1, 0xB7, 0x7E, 0x51, 0x0D, 0x34, 0xB3,
+ 0xED, 0xE7, 0x5E, 0xB8, 0x8A, 0xE9, 0xFE, 0x89,
+ 0xF8, 0x0B, 0x85, 0x09, 0x76, 0x08, 0x78, 0x0D,
+ 0x27, 0x59, 0x8E, 0x14, 0x43, 0x46, 0xA0, 0x91,
+ 0xEE, 0xAA, 0xFF, 0x74, 0x8D, 0xBC, 0x98, 0xB9,
+ 0x77, 0xBD, 0x41, 0x4F, 0xAB, 0xF8, 0x78, 0x1F,
+ 0xED, 0x2B, 0x14, 0x89, 0xB5, 0x7B, 0x61, 0x5E,
+ 0x88, 0x35, 0x46, 0x0F, 0x83, 0x5B, 0xC6, 0xE6,
+ 0x61, 0x1D, 0xD8, 0x5E, 0xD3, 0xC3, 0xC6, 0xE8,
+ 0xFB, 0x8E, 0x59, 0xDB, 0x31, 0x17, 0xF8, 0xCD,
+ 0xC1, 0xD4, 0x2D, 0xEF, 0xD8, 0x25, 0x9E, 0x88,
+ 0x10, 0x58, 0xF2, 0xA6, 0x84, 0x4F, 0xA1, 0x32,
+ 0x5F, 0x0E, 0xA2, 0x14, 0xF7, 0x03, 0x85, 0x06,
+ 0x94, 0x4F, 0x83, 0x87, 0x04, 0x97, 0x5A, 0x8D,
+ 0x9A, 0x73, 0x36, 0x2A, 0xE0, 0x6D, 0xA9, 0x1F,
+ 0xBC, 0x2F, 0xD2, 0x9E, 0xD1, 0x7D, 0x2C, 0x89,
+ 0x1F, 0xE1, 0xA0, 0x8F, 0x5D, 0x3E, 0xAB, 0x9E,
+ 0x79, 0x1A, 0x76, 0xC3, 0x0A, 0xC8, 0xCF, 0xCB,
+ 0x35, 0x63, 0xD9, 0x46, 0x87, 0xAF, 0x74, 0x24,
+ 0x47, 0xBA, 0x60, 0xAB, 0x33, 0x5D, 0xA8, 0xDE,
+ 0xFE, 0x1B, 0xC5, 0x3F, 0xAC, 0xD9, 0xAD, 0x94
+};
+
+static const uint8_t C12_CTR[] = {
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+};
+static const uint8_t T12_CTR[] = { 0 };
+static const uint8_t A12_CTR[] = { 0 };
+#define A12_CTR_len 0
+
+/*
+ Test Vector #13: Encrypting 300 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Full 2x8 blocks + 44B (2 full blocks and partial block)'
+*/
+static const uint8_t K13_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV13_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P13_CTR[] = {
+ 0xA8, 0x63, 0x44, 0xF8, 0x36, 0x59, 0x2F, 0xF2,
+ 0xDA, 0xDD, 0x17, 0xCE, 0xFE, 0x2A, 0xF2, 0xA2,
+ 0x35, 0x87, 0x34, 0x0F, 0x35, 0xFC, 0xD8, 0xF2,
+ 0x57, 0xA1, 0xCB, 0x19, 0x0C, 0x33, 0x14, 0xE1,
+ 0x23, 0xEB, 0xC0, 0x88, 0x82, 0x05, 0x5F, 0x01,
+ 0x5D, 0xFC, 0x53, 0x08, 0xDB, 0x34, 0x8E, 0x94,
+ 0xE4, 0xA8, 0x26, 0x7F, 0xBC, 0xB7, 0x8B, 0xE1,
+ 0x58, 0x2F, 0x2C, 0x91, 0xCD, 0x5B, 0x4A, 0xAA,
+ 0x7A, 0xBA, 0x5F, 0xD2, 0x9B, 0xF8, 0x7D, 0xEA,
+ 0x76, 0xB6, 0x64, 0xB3, 0x29, 0xD3, 0x02, 0x19,
+ 0xA0, 0xDC, 0xE9, 0xB8, 0x90, 0x51, 0xA8, 0xDE,
+ 0x2E, 0xA1, 0xB7, 0x7E, 0x51, 0x0D, 0x34, 0xB3,
+ 0xED, 0xE7, 0x5E, 0xB8, 0x8A, 0xE9, 0xFE, 0x89,
+ 0xF8, 0x0B, 0x85, 0x09, 0x76, 0x08, 0x78, 0x0D,
+ 0x27, 0x59, 0x8E, 0x14, 0x43, 0x46, 0xA0, 0x91,
+ 0xEE, 0xAA, 0xFF, 0x74, 0x8D, 0xBC, 0x98, 0xB9,
+ 0x77, 0xBD, 0x41, 0x4F, 0xAB, 0xF8, 0x78, 0x1F,
+ 0xED, 0x2B, 0x14, 0x89, 0xB5, 0x7B, 0x61, 0x5E,
+ 0x88, 0x35, 0x46, 0x0F, 0x83, 0x5B, 0xC6, 0xE6,
+ 0x61, 0x1D, 0xD8, 0x5E, 0xD3, 0xC3, 0xC6, 0xE8,
+ 0xFB, 0x8E, 0x59, 0xDB, 0x31, 0x17, 0xF8, 0xCD,
+ 0xC1, 0xD4, 0x2D, 0xEF, 0xD8, 0x25, 0x9E, 0x88,
+ 0x10, 0x58, 0xF2, 0xA6, 0x84, 0x4F, 0xA1, 0x32,
+ 0x5F, 0x0E, 0xA2, 0x14, 0xF7, 0x03, 0x85, 0x06,
+ 0x94, 0x4F, 0x83, 0x87, 0x04, 0x97, 0x5A, 0x8D,
+ 0x9A, 0x73, 0x36, 0x2A, 0xE0, 0x6D, 0xA9, 0x1F,
+ 0xBC, 0x2F, 0xD2, 0x9E, 0xD1, 0x7D, 0x2C, 0x89,
+ 0x1F, 0xE1, 0xA0, 0x8F, 0x5D, 0x3E, 0xAB, 0x9E,
+ 0x79, 0x1A, 0x76, 0xC3, 0x0A, 0xC8, 0xCF, 0xCB,
+ 0x35, 0x63, 0xD9, 0x46, 0x87, 0xAF, 0x74, 0x24,
+ 0x47, 0xBA, 0x60, 0xAB, 0x33, 0x5D, 0xA8, 0xDE,
+ 0xFE, 0x1B, 0xC5, 0x3F, 0xAC, 0xD9, 0xAD, 0x94,
+ 0x66, 0xb8, 0x3f, 0x3a, 0x21, 0x9f, 0xd0, 0x43,
+ 0x46, 0xdd, 0x65, 0x8b, 0x44, 0x99, 0x66, 0x91,
+ 0x64, 0xe2, 0x69, 0x6f, 0xbb, 0x85, 0x8c, 0xcc,
+ 0x7f, 0xea, 0x96, 0xd1, 0x5e, 0xb4, 0x7c, 0xd0,
+ 0xab, 0x02, 0x8d, 0xa3, 0x59, 0x3b, 0x8c, 0xd5,
+ 0xd0, 0xe7, 0xb4, 0xc4
+};
+
+static const uint8_t C13_CTR[] = {
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0xED, 0x52, 0x55, 0xB9, 0x76, 0x6C, 0x5E, 0x6E,
+ 0x76, 0x97, 0x00, 0xC7, 0xEB, 0xFE, 0xEC, 0x10,
+ 0x94, 0x2C, 0xA9, 0xAF, 0x9B, 0x09, 0x19, 0xB3,
+ 0x17, 0x29, 0x96, 0xBA, 0x8E, 0xAC, 0x3D, 0x0A,
+ 0x9B, 0x70, 0x54, 0x0F, 0x1E, 0xD4, 0xE8, 0x13,
+ 0xE6, 0x8F, 0xAD, 0xFD
+
+};
+static const uint8_t T13_CTR[] = { 0 };
+static const uint8_t A13_CTR[] = { 0 };
+#define A13_CTR_len 0
+
+/*
+ Test Vector #14: Encrypting 384 octets using AES-CTR with 128-bit key
+ AES Key : AE 68 52 F8 12 10 67 CC 4B F7 A5 76 55 77 F3 9E
+ AES-CTR IV : 00 00 00 00 00 00 00 00
+ Nonce : 00 00 00 30
+ Plaintext String : 'Full 3x8 blocks'
+*/
+static const uint8_t K14_CTR[] = {
+ 0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC,
+ 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3, 0x9E,
+};
+static const uint8_t IV14_CTR[] = {
+ 0x00, 0x00, 0x00, 0x30, /* nonce */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+static const uint8_t P14_CTR[] = {
+ 0xA8, 0x63, 0x44, 0xF8, 0x36, 0x59, 0x2F, 0xF2,
+ 0xDA, 0xDD, 0x17, 0xCE, 0xFE, 0x2A, 0xF2, 0xA2,
+ 0x35, 0x87, 0x34, 0x0F, 0x35, 0xFC, 0xD8, 0xF2,
+ 0x57, 0xA1, 0xCB, 0x19, 0x0C, 0x33, 0x14, 0xE1,
+ 0x23, 0xEB, 0xC0, 0x88, 0x82, 0x05, 0x5F, 0x01,
+ 0x5D, 0xFC, 0x53, 0x08, 0xDB, 0x34, 0x8E, 0x94,
+ 0xE4, 0xA8, 0x26, 0x7F, 0xBC, 0xB7, 0x8B, 0xE1,
+ 0x58, 0x2F, 0x2C, 0x91, 0xCD, 0x5B, 0x4A, 0xAA,
+ 0x7A, 0xBA, 0x5F, 0xD2, 0x9B, 0xF8, 0x7D, 0xEA,
+ 0x76, 0xB6, 0x64, 0xB3, 0x29, 0xD3, 0x02, 0x19,
+ 0xA0, 0xDC, 0xE9, 0xB8, 0x90, 0x51, 0xA8, 0xDE,
+ 0x2E, 0xA1, 0xB7, 0x7E, 0x51, 0x0D, 0x34, 0xB3,
+ 0xED, 0xE7, 0x5E, 0xB8, 0x8A, 0xE9, 0xFE, 0x89,
+ 0xF8, 0x0B, 0x85, 0x09, 0x76, 0x08, 0x78, 0x0D,
+ 0x27, 0x59, 0x8E, 0x14, 0x43, 0x46, 0xA0, 0x91,
+ 0xEE, 0xAA, 0xFF, 0x74, 0x8D, 0xBC, 0x98, 0xB9,
+ 0x77, 0xBD, 0x41, 0x4F, 0xAB, 0xF8, 0x78, 0x1F,
+ 0xED, 0x2B, 0x14, 0x89, 0xB5, 0x7B, 0x61, 0x5E,
+ 0x88, 0x35, 0x46, 0x0F, 0x83, 0x5B, 0xC6, 0xE6,
+ 0x61, 0x1D, 0xD8, 0x5E, 0xD3, 0xC3, 0xC6, 0xE8,
+ 0xFB, 0x8E, 0x59, 0xDB, 0x31, 0x17, 0xF8, 0xCD,
+ 0xC1, 0xD4, 0x2D, 0xEF, 0xD8, 0x25, 0x9E, 0x88,
+ 0x10, 0x58, 0xF2, 0xA6, 0x84, 0x4F, 0xA1, 0x32,
+ 0x5F, 0x0E, 0xA2, 0x14, 0xF7, 0x03, 0x85, 0x06,
+ 0x94, 0x4F, 0x83, 0x87, 0x04, 0x97, 0x5A, 0x8D,
+ 0x9A, 0x73, 0x36, 0x2A, 0xE0, 0x6D, 0xA9, 0x1F,
+ 0xBC, 0x2F, 0xD2, 0x9E, 0xD1, 0x7D, 0x2C, 0x89,
+ 0x1F, 0xE1, 0xA0, 0x8F, 0x5D, 0x3E, 0xAB, 0x9E,
+ 0x79, 0x1A, 0x76, 0xC3, 0x0A, 0xC8, 0xCF, 0xCB,
+ 0x35, 0x63, 0xD9, 0x46, 0x87, 0xAF, 0x74, 0x24,
+ 0x47, 0xBA, 0x60, 0xAB, 0x33, 0x5D, 0xA8, 0xDE,
+ 0xFE, 0x1B, 0xC5, 0x3F, 0xAC, 0xD9, 0xAD, 0x94,
+ 0x66, 0xb8, 0x3f, 0x3a, 0x21, 0x9f, 0xd0, 0x43,
+ 0x46, 0xdd, 0x65, 0x8b, 0x44, 0x99, 0x66, 0x91,
+ 0x64, 0xe2, 0x69, 0x6f, 0xbb, 0x85, 0x8c, 0xcc,
+ 0x7f, 0xea, 0x96, 0xd1, 0x5e, 0xb4, 0x7c, 0xd0,
+ 0xab, 0x02, 0x8d, 0xa3, 0x59, 0x3b, 0x8c, 0xd5,
+ 0xd0, 0xe7, 0xb4, 0xc4, 0x90, 0x41, 0x9f, 0x78,
+ 0x4e, 0x82, 0x9e, 0xe4, 0x1b, 0x97, 0xa9, 0xa4,
+ 0x7b, 0x48, 0xad, 0x56, 0xc0, 0xe4, 0x86, 0x52,
+ 0xfc, 0xad, 0x93, 0x0b, 0x7d, 0x38, 0xce, 0x73,
+ 0x64, 0xbd, 0xf7, 0x00, 0x7b, 0xe6, 0x46, 0x03,
+ 0x2f, 0x4b, 0x75, 0x9f, 0x3a, 0x2d, 0x32, 0x42,
+ 0xfe, 0x80, 0x74, 0x89, 0x27, 0x34, 0xce, 0x5e,
+ 0xbf, 0xbe, 0x07, 0x50, 0x91, 0x08, 0x27, 0x2b,
+ 0x32, 0x77, 0xa7, 0xff, 0x83, 0xb1, 0xab, 0xc8,
+ 0x98, 0xbe, 0xac, 0x33, 0x7c, 0x47, 0x19, 0x33,
+ 0x6f, 0x4d, 0xbe, 0x3e, 0xdc, 0xe0, 0x87, 0xfb
+};
+
+static const uint8_t C14_CTR[] = {
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0x1F, 0x03, 0x77, 0xD0, 0xED, 0x9B, 0xBC, 0xE9,
+ 0x9B, 0xD3, 0x01, 0x06, 0xF8, 0x54, 0x90, 0x7D,
+ 0x67, 0x6F, 0x19, 0xD7, 0x0B, 0xF3, 0x92, 0x8D,
+ 0x60, 0xC0, 0x18, 0x5A, 0x24, 0xC1, 0xD7, 0x60,
+ 0x82, 0x9C, 0x22, 0x37, 0x45, 0xE3, 0x9D, 0xA6,
+ 0x76, 0x37, 0xE1, 0x7A, 0x13, 0xB4, 0x40, 0x63,
+ 0xF4, 0xD8, 0xDE, 0x41, 0x64, 0xFC, 0xE2, 0x42,
+ 0x2E, 0x3F, 0xEA, 0xE1, 0x28, 0x06, 0xA5, 0xAC,
+ 0x6A, 0xC1, 0x58, 0x0C, 0x84, 0xFF, 0x9B, 0x6A,
+ 0xE5, 0xBE, 0x4E, 0x8C, 0x4C, 0xE9, 0x97, 0xD5,
+ 0x24, 0x30, 0x1B, 0x19, 0xDF, 0x87, 0x56, 0x85,
+ 0x31, 0x56, 0x5A, 0xDE, 0xE0, 0x6E, 0xC0, 0x1C,
+ 0xCB, 0x51, 0x5B, 0x6E, 0xAC, 0xF5, 0xB0, 0x60,
+ 0x60, 0x2F, 0x62, 0x0A, 0xEA, 0x62, 0x51, 0x2E,
+ 0x5B, 0x1B, 0x99, 0x51, 0x3B, 0xAC, 0xE9, 0xC5,
+ 0x59, 0x7D, 0x0E, 0xB6, 0x51, 0x6C, 0x16, 0x7A,
+ 0xED, 0x52, 0x55, 0xB9, 0x76, 0x6C, 0x5E, 0x6E,
+ 0x76, 0x97, 0x00, 0xC7, 0xEB, 0xFE, 0xEC, 0x10,
+ 0x94, 0x2C, 0xA9, 0xAF, 0x9B, 0x09, 0x19, 0xB3,
+ 0x17, 0x29, 0x96, 0xBA, 0x8E, 0xAC, 0x3D, 0x0A,
+ 0x9B, 0x70, 0x54, 0x0F, 0x1E, 0xD4, 0xE8, 0x13,
+ 0xE6, 0x8F, 0xAD, 0xFD, 0xFD, 0x13, 0xCF, 0xD5,
+ 0x94, 0x06, 0xA0, 0x24, 0x79, 0xC0, 0xF8, 0x05,
+ 0x3D, 0x19, 0xEB, 0x96, 0xDA, 0x31, 0xAE, 0xF5,
+ 0x4D, 0x82, 0x2C, 0x23, 0x03, 0x9A, 0x43, 0x85,
+ 0x94, 0x36, 0x30, 0xE8, 0x0A, 0x9B, 0x1F, 0x05,
+ 0x6E, 0x4B, 0xA5, 0x98, 0x78, 0xBE, 0x73, 0x0D,
+ 0x8C, 0x60, 0x55, 0x88, 0xD6, 0xA3, 0x80, 0x13,
+ 0x19, 0xDB, 0xF8, 0xCD, 0xA7, 0xDC, 0x28, 0x4C,
+ 0x09, 0xAF, 0xFE, 0x88, 0x77, 0xE1, 0x6E, 0x12,
+ 0x57, 0x5A, 0xA8, 0xC6, 0x38, 0xCF, 0xF5, 0x0D,
+ 0x42, 0x2C, 0x67, 0xB3, 0x22, 0x6F, 0x3D, 0x7D
+};
+static const uint8_t T14_CTR[] = { 0 };
+static const uint8_t A14_CTR[] = { 0 };
+#define A14_CTR_len 0
+
+static const uint8_t K15_CTR[] = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+};
+static const uint8_t IV15_CTR[] = {
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+};
+static const uint8_t P15_CTR[] = {
+ 0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
+ 0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A,
+ 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
+ 0x9E, 0xB7, 0x6F, 0xAC, 0x45, 0xAF, 0x8E, 0x51,
+ 0x30, 0xC8, 0x1C, 0x46, 0xA3, 0x5C, 0xE4, 0x11,
+ 0xE5, 0xFB, 0xC1, 0x19, 0x1A, 0x0A, 0x52, 0xEF,
+ 0xF6, 0x9F, 0x24, 0x45, 0xDF, 0x4F, 0x9B, 0x17,
+ 0xAD, 0x2B, 0x41, 0x7B, 0xE6, 0x6C, 0x37, 0x10
+};
+
+static const uint8_t C15_CTR[] = {
+ 0x87, 0x4D, 0x61, 0x91, 0xB6, 0x20, 0xE3, 0x26,
+ 0x1B, 0xEF, 0x68, 0x64, 0x99, 0x0D, 0xB6, 0xCE,
+ 0x98, 0x06, 0xF6, 0x6B, 0x79, 0x70, 0xFD, 0xFF,
+ 0x86, 0x17, 0x18, 0x7B, 0xB9, 0xFF, 0xFD, 0xFF,
+ 0x5A, 0xE4, 0xDF, 0x3E, 0xDB, 0xD5, 0xD3, 0x5E,
+ 0x5B, 0x4F, 0x09, 0x02, 0x0D, 0xB0, 0x3E, 0xAB,
+ 0x1E, 0x03, 0x1D, 0xDA, 0x2F, 0xBE, 0x03, 0xD1,
+ 0x79, 0x21, 0x70, 0xA0, 0xF3, 0x00, 0x9C, 0xEE
+};
+
+static const uint8_t T15_CTR[] = { 0 };
+static const uint8_t A15_CTR[] = { 0 };
+#define A15_CTR_len 0
+
+#define bit_vector(N) \
+ {K##N, (KBITS(K##N)), IV##N, sizeof(IV##N), A##N, A##N##_len, \
+ P##N, P##N##_len, C##N, T##N, sizeof(T##N)}
+
+static const uint8_t K1_CTR_BIT[] = {
+ 0xd3, 0xc5, 0xd5, 0x92, 0x32, 0x7f, 0xb1, 0x1c,
+ 0x40, 0x35, 0xc6, 0x68, 0x0a, 0xf8, 0xc6, 0xd1
+
+};
+static const uint8_t IV1_CTR_BIT[] = {
+ 0x39, 0x8a, 0x59, 0xb4, 0xac, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 3 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P1_CTR_BIT[] = {
+ 0x98, 0x1b, 0xa6, 0x82, 0x4c, 0x1b, 0xfb, 0x1a,
+ 0xb4, 0x85, 0x47, 0x20, 0x29, 0xb7, 0x1d, 0x80,
+ 0x8c, 0xe3, 0x3e, 0x2c, 0xc3, 0xc0, 0xb5, 0xfc,
+ 0x1f, 0x3d, 0xe8, 0xa6, 0xdc, 0x66, 0xb1, 0xf7
+};
+
+static const uint8_t C1_CTR_BIT[] = {
+ 0xe9, 0xfe, 0xd8, 0xa6, 0x3d, 0x15, 0x53, 0x04,
+ 0xd7, 0x1d, 0xf2, 0x0b, 0xf3, 0xe8, 0x22, 0x14,
+ 0xb2, 0x0e, 0xd7, 0xda, 0xd2, 0xf2, 0x33, 0xdc,
+ 0x3c, 0x22, 0xd7, 0xbd, 0xee, 0xed, 0x8e, 0x7f
+};
+
+static const uint8_t T1_CTR_BIT[] = { 0 };
+static const uint8_t A1_CTR_BIT[] = { 0 };
+#define A1_CTR_BIT_len 0
+#define P1_CTR_BIT_len 253
+
+static const uint8_t K2_CTR_BIT[] = {
+ 0x2b, 0xd6, 0x45, 0x9f, 0x82, 0xc4, 0x40, 0xe0,
+ 0x95, 0x2c, 0x49, 0x10, 0x48, 0x05, 0xff, 0x48
+};
+
+static const uint8_t IV2_CTR_BIT[] = {
+ 0xc6, 0x75, 0xa6, 0x4b, 0x64, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 2 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P2_CTR_BIT[] = {
+ 0x7e, 0xc6, 0x12, 0x72, 0x74, 0x3b, 0xf1, 0x61,
+ 0x47, 0x26, 0x44, 0x6a, 0x6c, 0x38, 0xce, 0xd1,
+ 0x66, 0xf6, 0xca, 0x76, 0xeb, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6c, 0xef, 0x13, 0x0f, 0x92,
+ 0x92, 0x2b, 0x03, 0x45, 0x0d, 0x3a, 0x99, 0x75,
+ 0xe5, 0xbd, 0x2e, 0xa0, 0xeb, 0x55, 0xad, 0x8e,
+ 0x1b, 0x19, 0x9e, 0x3e, 0xc4, 0x31, 0x60, 0x20,
+ 0xe9, 0xa1, 0xb2, 0x85, 0xe7, 0x62, 0x79, 0x53,
+ 0x59, 0xb7, 0xbd, 0xfd, 0x39, 0xbe, 0xf4, 0xb2,
+ 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae,
+ 0xe6, 0x38, 0xbf, 0x5f, 0xd5, 0xa6, 0x06, 0x19,
+ 0x39, 0x01, 0xa0, 0x8f, 0x4a, 0xb4, 0x1a, 0xab,
+ 0x9b, 0x13, 0x48, 0x83
+};
+
+static const uint8_t C2_CTR_BIT[] = {
+ 0x59, 0x61, 0x60, 0x53, 0x53, 0xc6, 0x4b, 0xdc,
+ 0xa1, 0x5b, 0x19, 0x5e, 0x28, 0x85, 0x53, 0xa9,
+ 0x10, 0x63, 0x25, 0x06, 0xd6, 0x20, 0x0a, 0xa7,
+ 0x90, 0xc4, 0xc8, 0x06, 0xc9, 0x99, 0x04, 0xcf,
+ 0x24, 0x45, 0xcc, 0x50, 0xbb, 0x1c, 0xf1, 0x68,
+ 0xa4, 0x96, 0x73, 0x73, 0x4e, 0x08, 0x1b, 0x57,
+ 0xe3, 0x24, 0xce, 0x52, 0x59, 0xc0, 0xe7, 0x8d,
+ 0x4c, 0xd9, 0x7b, 0x87, 0x09, 0x76, 0x50, 0x3c,
+ 0x09, 0x43, 0xf2, 0xcb, 0x5a, 0xe8, 0xf0, 0x52,
+ 0xc7, 0xb7, 0xd3, 0x92, 0x23, 0x95, 0x87, 0xb8,
+ 0x95, 0x60, 0x86, 0xbc, 0xab, 0x18, 0x83, 0x60,
+ 0x42, 0xe2, 0xe6, 0xce, 0x42, 0x43, 0x2a, 0x17,
+ 0x10, 0x5c, 0x53, 0xd3
+};
+
+static const uint8_t T2_CTR_BIT[] = { 0 };
+static const uint8_t A2_CTR_BIT[] = { 0 };
+#define A2_CTR_BIT_len 0
+#define P2_CTR_BIT_len 798
+
+static const uint8_t K3_CTR_BIT[] = {
+ 0x0a, 0x8b, 0x6b, 0xd8, 0xd9, 0xb0, 0x8b, 0x08,
+ 0xd6, 0x4e, 0x32, 0xd1, 0x81, 0x77, 0x77, 0xfb
+};
+
+static const uint8_t IV3_CTR_BIT[] = {
+ 0x54, 0x4d, 0x49, 0xcd, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 2 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P3_CTR_BIT[] = {
+ 0xfd, 0x40, 0xa4, 0x1d, 0x37, 0x0a, 0x1f, 0x65,
+ 0x74, 0x50, 0x95, 0x68, 0x7d, 0x47, 0xba, 0x1d,
+ 0x36, 0xd2, 0x34, 0x9e, 0x23, 0xf6, 0x44, 0x39,
+ 0x2c, 0x8e, 0xa9, 0xc4, 0x9d, 0x40, 0xc1, 0x32,
+ 0x71, 0xaf, 0xf2, 0x64, 0xd0, 0xf2, 0x4b
+};
+
+static const uint8_t C3_CTR_BIT[] = {
+ 0x75, 0x75, 0x0d, 0x37, 0xb4, 0xbb, 0xa2, 0xa4,
+ 0xde, 0xdb, 0x34, 0x23, 0x5b, 0xd6, 0x8c, 0x66,
+ 0x45, 0xac, 0xda, 0xac, 0xa4, 0x81, 0x38, 0xa3,
+ 0xb0, 0xc4, 0x71, 0xe2, 0xa7, 0x04, 0x1a, 0x57,
+ 0x64, 0x23, 0xd2, 0x92, 0x72, 0x87, 0xf3
+};
+
+static const uint8_t T3_CTR_BIT[] = { 0 };
+static const uint8_t A3_CTR_BIT[] = { 0 };
+#define A3_CTR_BIT_len 0
+#define P3_CTR_BIT_len 310
+
+static const uint8_t K4_CTR_BIT[] = {
+ 0xaa, 0x1f, 0x95, 0xae, 0xa5, 0x33, 0xbc, 0xb3,
+ 0x2e, 0xb6, 0x3b, 0xf5, 0x2d, 0x8f, 0x83, 0x1a
+};
+
+static const uint8_t IV4_CTR_BIT[] = {
+ 0x72, 0xd8, 0xc6, 0x71, 0x84, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 2 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P4_CTR_BIT[] = {
+ 0xfb, 0x1b, 0x96, 0xc5, 0xc8, 0xba, 0xdf, 0xb2,
+ 0xe8, 0xe8, 0xed, 0xfd, 0xe7, 0x8e, 0x57, 0xf2,
+ 0xad, 0x81, 0xe7, 0x41, 0x03, 0xfc, 0x43, 0x0a,
+ 0x53, 0x4d, 0xcc, 0x37, 0xaf, 0xce, 0xc7, 0x0e,
+ 0x15, 0x17, 0xbb, 0x06, 0xf2, 0x72, 0x19, 0xda,
+ 0xe4, 0x90, 0x22, 0xdd, 0xc4, 0x7a, 0x06, 0x8d,
+ 0xe4, 0xc9, 0x49, 0x6a, 0x95, 0x1a, 0x6b, 0x09,
+ 0xed, 0xbd, 0xc8, 0x64, 0xc7, 0xad, 0xbd, 0x74,
+ 0x0a, 0xc5, 0x0c, 0x02, 0x2f, 0x30, 0x82, 0xba,
+ 0xfd, 0x22, 0xd7, 0x81, 0x97, 0xc5, 0xd5, 0x08,
+ 0xb9, 0x77, 0xbc, 0xa1, 0x3f, 0x32, 0xe6, 0x52,
+ 0xe7, 0x4b, 0xa7, 0x28, 0x57, 0x60, 0x77, 0xce,
+ 0x62, 0x8c, 0x53, 0x5e, 0x87, 0xdc, 0x60, 0x77,
+ 0xba, 0x07, 0xd2, 0x90, 0x68, 0x59, 0x0c, 0x8c,
+ 0xb5, 0xf1, 0x08, 0x8e, 0x08, 0x2c, 0xfa, 0x0e,
+ 0xc9, 0x61, 0x30, 0x2d, 0x69, 0xcf, 0x3d, 0x47
+};
+
+static const uint8_t C4_CTR_BIT[] = {
+ 0xdf, 0xb4, 0x40, 0xac, 0xb3, 0x77, 0x35, 0x49,
+ 0xef, 0xc0, 0x46, 0x28, 0xae, 0xb8, 0xd8, 0x15,
+ 0x62, 0x75, 0x23, 0x0b, 0xdc, 0x69, 0x0d, 0x94,
+ 0xb0, 0x0d, 0x8d, 0x95, 0xf2, 0x8c, 0x4b, 0x56,
+ 0x30, 0x7f, 0x60, 0xf4, 0xca, 0x55, 0xeb, 0xa6,
+ 0x61, 0xeb, 0xba, 0x72, 0xac, 0x80, 0x8f, 0xa8,
+ 0xc4, 0x9e, 0x26, 0x78, 0x8e, 0xd0, 0x4a, 0x5d,
+ 0x60, 0x6c, 0xb4, 0x18, 0xde, 0x74, 0x87, 0x8b,
+ 0x9a, 0x22, 0xf8, 0xef, 0x29, 0x59, 0x0b, 0xc4,
+ 0xeb, 0x57, 0xc9, 0xfa, 0xf7, 0xc4, 0x15, 0x24,
+ 0xa8, 0x85, 0xb8, 0x97, 0x9c, 0x42, 0x3f, 0x2f,
+ 0x8f, 0x8e, 0x05, 0x92, 0xa9, 0x87, 0x92, 0x01,
+ 0xbe, 0x7f, 0xf9, 0x77, 0x7a, 0x16, 0x2a, 0xb8,
+ 0x10, 0xfe, 0xb3, 0x24, 0xba, 0x74, 0xc4, 0xc1,
+ 0x56, 0xe0, 0x4d, 0x39, 0x09, 0x72, 0x09, 0x65,
+ 0x3a, 0xc3, 0x3e, 0x5a, 0x5f, 0x2d, 0x88, 0x67
+};
+
+static const uint8_t T4_CTR_BIT[] = { 0 };
+static const uint8_t A4_CTR_BIT[] = { 0 };
+#define A4_CTR_BIT_len 0
+#define P4_CTR_BIT_len 1022
+
+static const uint8_t K5_CTR_BIT[] = {
+ 0x96, 0x18, 0xae, 0x46, 0x89, 0x1f, 0x86, 0x57,
+ 0x8e, 0xeb, 0xe9, 0x0e, 0xf7, 0xa1, 0x20, 0x2e
+};
+
+static const uint8_t IV5_CTR_BIT[] = {
+ 0xc6, 0x75, 0xa6, 0x4b, 0x64, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 3 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P5_CTR_BIT[] = {
+ 0x8d, 0xaa, 0x17, 0xb1, 0xae, 0x05, 0x05, 0x29,
+ 0xc6, 0x82, 0x7f, 0x28, 0xc0, 0xef, 0x6a, 0x12,
+ 0x42, 0xe9, 0x3f, 0x8b, 0x31, 0x4f, 0xb1, 0x8a,
+ 0x77, 0xf7, 0x90, 0xae, 0x04, 0x9f, 0xed, 0xd6,
+ 0x12, 0x26, 0x7f, 0xec, 0xae, 0xfc, 0x45, 0x01,
+ 0x74, 0xd7, 0x6d, 0x9f, 0x9a, 0xa7, 0x75, 0x5a,
+ 0x30, 0xcd, 0x90, 0xa9, 0xa5, 0x87, 0x4b, 0xf4,
+ 0x8e, 0xaf, 0x70, 0xee, 0xa3, 0xa6, 0x2a, 0x25,
+ 0x0a, 0x8b, 0x6b, 0xd8, 0xd9, 0xb0, 0x8b, 0x08,
+ 0xd6, 0x4e, 0x32, 0xd1, 0x81, 0x77, 0x77, 0xfb,
+ 0x54, 0x4d, 0x49, 0xcd, 0x49, 0x72, 0x0e, 0x21,
+ 0x9d, 0xbf, 0x8b, 0xbe, 0xd3, 0x39, 0x04, 0xe1,
+ 0xfd, 0x40, 0xa4, 0x1d, 0x37, 0x0a, 0x1f, 0x65,
+ 0x74, 0x50, 0x95, 0x68, 0x7d, 0x47, 0xba, 0x1d,
+ 0x36, 0xd2, 0x34, 0x9e, 0x23, 0xf6, 0x44, 0x39,
+ 0x2c, 0x8e, 0xa9, 0xc4, 0x9d, 0x40, 0xc1, 0x32,
+ 0x71, 0xaf, 0xf2, 0x64, 0xd0, 0xf2, 0x48, 0x41,
+ 0xd6, 0x46, 0x5f, 0x09, 0x96, 0xff, 0x84, 0xe6,
+ 0x5f, 0xc5, 0x17, 0xc5, 0x3e, 0xfc, 0x33, 0x63,
+ 0xc3, 0x84, 0x92, 0xaf
+};
+
+static const uint8_t C5_CTR_BIT[] = {
+ 0x91, 0x9c, 0x8c, 0x33, 0xd6, 0x67, 0x89, 0x70,
+ 0x3d, 0x05, 0xa0, 0xd7, 0xce, 0x82, 0xa2, 0xae,
+ 0xac, 0x4e, 0xe7, 0x6c, 0x0f, 0x4d, 0xa0, 0x50,
+ 0x33, 0x5e, 0x8a, 0x84, 0xe7, 0x89, 0x7b, 0xa5,
+ 0xdf, 0x2f, 0x36, 0xbd, 0x51, 0x3e, 0x3d, 0x0c,
+ 0x85, 0x78, 0xc7, 0xa0, 0xfc, 0xf0, 0x43, 0xe0,
+ 0x3a, 0xa3, 0xa3, 0x9f, 0xba, 0xad, 0x7d, 0x15,
+ 0xbe, 0x07, 0x4f, 0xaa, 0x5d, 0x90, 0x29, 0xf7,
+ 0x1f, 0xb4, 0x57, 0xb6, 0x47, 0x83, 0x47, 0x14,
+ 0xb0, 0xe1, 0x8f, 0x11, 0x7f, 0xca, 0x10, 0x67,
+ 0x79, 0x45, 0x09, 0x6c, 0x8c, 0x5f, 0x32, 0x6b,
+ 0xa8, 0xd6, 0x09, 0x5e, 0xb2, 0x9c, 0x3e, 0x36,
+ 0xcf, 0x24, 0x5d, 0x16, 0x22, 0xaa, 0xfe, 0x92,
+ 0x1f, 0x75, 0x66, 0xc4, 0xf5, 0xd6, 0x44, 0xf2,
+ 0xf1, 0xfc, 0x0e, 0xc6, 0x84, 0xdd, 0xb2, 0x13,
+ 0x49, 0x74, 0x76, 0x22, 0xe2, 0x09, 0x29, 0x5d,
+ 0x27, 0xff, 0x3f, 0x95, 0x62, 0x33, 0x71, 0xd4,
+ 0x9b, 0x14, 0x7c, 0x0a, 0xf4, 0x86, 0x17, 0x1f,
+ 0x22, 0xcd, 0x04, 0xb1, 0xcb, 0xeb, 0x26, 0x58,
+ 0x22, 0x3e, 0x69, 0x3f
+};
+
+static const uint8_t T5_CTR_BIT[] = { 0 };
+static const uint8_t A5_CTR_BIT[] = { 0 };
+#define A5_CTR_BIT_len 0
+#define P5_CTR_BIT_len 1245
+
+static const uint8_t K6_CTR_BIT[] = {
+ 0x54, 0xf4, 0xe2, 0xe0, 0x4c, 0x83, 0x78, 0x6e,
+ 0xec, 0x8f, 0xb5, 0xab, 0xe8, 0xe3, 0x65, 0x66
+
+};
+static const uint8_t IV6_CTR_BIT[] = {
+ 0xac, 0xa4, 0xf5, 0x0f, 0x58, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/* Last 3 bits (not to be ciphered) set to 1, since
+ * the output buffer is set to all 0xff */
+static const uint8_t P6_CTR_BIT[] = {
+ 0x40, 0x98, 0x1b, 0xa6, 0x82, 0x4c, 0x1b, 0xfb,
+ 0x42, 0x86, 0xb2, 0x99, 0x78, 0x3d, 0xaf, 0x44,
+ 0x2c, 0x09, 0x9f, 0x7a, 0xb0, 0xf5, 0x8d, 0x5c,
+ 0x8e, 0x46, 0xb1, 0x04, 0xf0, 0x8f, 0x01, 0xb4,
+ 0x1a, 0xb4, 0x85, 0x47, 0x20, 0x29, 0xb7, 0x1d,
+ 0x36, 0xbd, 0x1a, 0x3d, 0x90, 0xdc, 0x3a, 0x41,
+ 0xb4, 0x6d, 0x51, 0x67, 0x2a, 0xc4, 0xc9, 0x66,
+ 0x3a, 0x2b, 0xe0, 0x63, 0xda, 0x4b, 0xc8, 0xd2,
+ 0x80, 0x8c, 0xe3, 0x3e, 0x2c, 0xcc, 0xbf, 0xc6,
+ 0x34, 0xe1, 0xb2, 0x59, 0x06, 0x08, 0x76, 0xa0,
+ 0xfb, 0xb5, 0xa4, 0x37, 0xeb, 0xcc, 0x8d, 0x31,
+ 0xc1, 0x9e, 0x44, 0x54, 0x31, 0x87, 0x45, 0xe3,
+ 0xfa, 0x16, 0xbb, 0x11, 0xad, 0xae, 0x24, 0x88,
+ 0x79, 0xfe, 0x52, 0xdb, 0x25, 0x43, 0xe5, 0x3c,
+ 0xf4, 0x45, 0xd3, 0xd8, 0x28, 0xce, 0x0b, 0xf5,
+ 0xc5, 0x60, 0x59, 0x3d, 0x97, 0x27, 0x8a, 0x59,
+ 0x76, 0x2d, 0xd0, 0xc2, 0xc9, 0xcd, 0x68, 0xd4,
+ 0x49, 0x6a, 0x79, 0x25, 0x08, 0x61, 0x40, 0x14,
+ 0xb1, 0x3b, 0x6a, 0xa5, 0x11, 0x28, 0xc1, 0x8c,
+ 0xd6, 0xa9, 0x0b, 0x87, 0x97, 0x8c, 0x2f, 0xf1,
+ 0xca, 0xbe, 0x7d, 0x9f, 0x89, 0x8a, 0x41, 0x1b,
+ 0xfd, 0xb8, 0x4f, 0x68, 0xf6, 0x72, 0x7b, 0x14,
+ 0x99, 0xcd, 0xd3, 0x0d, 0xf0, 0x44, 0x3a, 0xb4,
+ 0xa6, 0x66, 0x53, 0x33, 0x0b, 0xcb, 0xa1, 0x10,
+ 0x5e, 0x4c, 0xec, 0x03, 0x4c, 0x73, 0xe6, 0x05,
+ 0xb4, 0x31, 0x0e, 0xaa, 0xad, 0xcf, 0xd5, 0xb0,
+ 0xca, 0x27, 0xff, 0xd8, 0x9d, 0x14, 0x4d, 0xf4,
+ 0x79, 0x27, 0x59, 0x42, 0x7c, 0x9c, 0xc1, 0xf8,
+ 0xcd, 0x8c, 0x87, 0x20, 0x23, 0x64, 0xb8, 0xa6,
+ 0x87, 0x95, 0x4c, 0xb0, 0x5a, 0x8d, 0x4e, 0x2d,
+ 0x99, 0xe7, 0x3d, 0xb1, 0x60, 0xde, 0xb1, 0x80,
+ 0xad, 0x08, 0x41, 0xe9, 0x67, 0x41, 0xa5, 0xd5,
+ 0x9f, 0xe4, 0x18, 0x9f, 0x15, 0x42, 0x00, 0x26,
+ 0xfe, 0x4c, 0xd1, 0x21, 0x04, 0x93, 0x2f, 0xb3,
+ 0x8f, 0x73, 0x53, 0x40, 0x43, 0x8a, 0xaf, 0x7e,
+ 0xca, 0x6f, 0xd5, 0xcf, 0xd3, 0xa1, 0x95, 0xce,
+ 0x5a, 0xbe, 0x65, 0x27, 0x2a, 0xf6, 0x07, 0xad,
+ 0xa1, 0xbe, 0x65, 0xa6, 0xb4, 0xc9, 0xc0, 0x69,
+ 0x32, 0x34, 0x09, 0x2c, 0x4d, 0x01, 0x8f, 0x17,
+ 0x56, 0xc6, 0xdb, 0x9d, 0xc8, 0xa6, 0xd8, 0x0b,
+ 0x88, 0x81, 0x38, 0x61, 0x6b, 0x68, 0x12, 0x62,
+ 0xf9, 0x54, 0xd0, 0xe7, 0x71, 0x17, 0x48, 0x78,
+ 0x0d, 0x92, 0x29, 0x1d, 0x86, 0x29, 0x99, 0x72,
+ 0xdb, 0x74, 0x1c, 0xfa, 0x4f, 0x37, 0xb8, 0xb5,
+ 0x6c, 0xdb, 0x18, 0xa7, 0xca, 0x82, 0x18, 0xe8,
+ 0x6e, 0x4b, 0x4b, 0x71, 0x6a, 0x4d, 0x04, 0x37,
+ 0x1f, 0xbe, 0xc2, 0x62, 0xfc, 0x5a, 0xd0, 0xb3,
+ 0x81, 0x9b, 0x18, 0x7b, 0x97, 0xe5, 0x5b, 0x1a,
+ 0x4d, 0x7c, 0x19, 0xee, 0x24, 0xc8, 0xb4, 0xd7,
+ 0x72, 0x3c, 0xfe, 0xdf, 0x04, 0x5b, 0x8a, 0xca,
+ 0xe4, 0x86, 0x95, 0x17, 0xd8, 0x0e, 0x50, 0x61,
+ 0x5d, 0x90, 0x35, 0xd5, 0xd9, 0xc5, 0xa4, 0x0a,
+ 0xf6, 0x02, 0x28, 0x0b, 0x54, 0x25, 0x97, 0xb0,
+ 0xcb, 0x18, 0x61, 0x9e, 0xeb, 0x35, 0x92, 0x57,
+ 0x59, 0xd1, 0x95, 0xe1, 0x00, 0xe8, 0xe4, 0xaa,
+ 0x0c, 0x38, 0xa3, 0xc2, 0xab, 0xe0, 0xf3, 0xd8,
+ 0xff, 0x04, 0xf3, 0xc3, 0x3c, 0x29, 0x50, 0x69,
+ 0xc2, 0x36, 0x94, 0xb5, 0xbb, 0xea, 0xcd, 0xd5,
+ 0x42, 0xe2, 0x8e, 0x8a, 0x94, 0xed, 0xb9, 0x11,
+ 0x9f, 0x41, 0x2d, 0x05, 0x4b, 0xe1, 0xfa, 0x72,
+ 0x00, 0xb0, 0x97
+};
+
+static const uint8_t C6_CTR_BIT[] = {
+ 0x5c, 0xb7, 0x2c, 0x6e, 0xdc, 0x87, 0x8f, 0x15,
+ 0x66, 0xe1, 0x02, 0x53, 0xaf, 0xc3, 0x64, 0xc9,
+ 0xfa, 0x54, 0x0d, 0x91, 0x4d, 0xb9, 0x4c, 0xbe,
+ 0xe2, 0x75, 0xd0, 0x91, 0x7c, 0xa6, 0xaf, 0x0d,
+ 0x77, 0xac, 0xb4, 0xef, 0x3b, 0xbe, 0x1a, 0x72,
+ 0x2b, 0x2e, 0xf5, 0xbd, 0x1d, 0x4b, 0x8e, 0x2a,
+ 0xa5, 0x02, 0x4e, 0xc1, 0x38, 0x8a, 0x20, 0x1e,
+ 0x7b, 0xce, 0x79, 0x20, 0xae, 0xc6, 0x15, 0x89,
+ 0x5f, 0x76, 0x3a, 0x55, 0x64, 0xdc, 0xc4, 0xc4,
+ 0x82, 0xa2, 0xee, 0x1d, 0x8b, 0xfe, 0xcc, 0x44,
+ 0x98, 0xec, 0xa8, 0x3f, 0xbb, 0x75, 0xf9, 0xab,
+ 0x53, 0x0e, 0x0d, 0xaf, 0xbe, 0xde, 0x2f, 0xa5,
+ 0x89, 0x5b, 0x82, 0x99, 0x1b, 0x62, 0x77, 0xc5,
+ 0x29, 0xe0, 0xf2, 0x52, 0x9d, 0x7f, 0x79, 0x60,
+ 0x6b, 0xe9, 0x67, 0x06, 0x29, 0x6d, 0xed, 0xfa,
+ 0x9d, 0x74, 0x12, 0xb6, 0x16, 0x95, 0x8c, 0xb5,
+ 0x63, 0xc6, 0x78, 0xc0, 0x28, 0x25, 0xc3, 0x0d,
+ 0x0a, 0xee, 0x77, 0xc4, 0xc1, 0x46, 0xd2, 0x76,
+ 0x54, 0x12, 0x42, 0x1a, 0x80, 0x8d, 0x13, 0xce,
+ 0xc8, 0x19, 0x69, 0x4c, 0x75, 0xad, 0x57, 0x2e,
+ 0x9b, 0x97, 0x3d, 0x94, 0x8b, 0x81, 0xa9, 0x33,
+ 0x7c, 0x3b, 0x2a, 0x17, 0x19, 0x2e, 0x22, 0xc2,
+ 0x06, 0x9f, 0x7e, 0xd1, 0x16, 0x2a, 0xf4, 0x4c,
+ 0xde, 0xa8, 0x17, 0x60, 0x36, 0x65, 0xe8, 0x07,
+ 0xce, 0x40, 0xc8, 0xe0, 0xdd, 0x9d, 0x63, 0x94,
+ 0xdc, 0x6e, 0x31, 0x15, 0x3f, 0xe1, 0x95, 0x5c,
+ 0x47, 0xaf, 0xb5, 0x1f, 0x26, 0x17, 0xee, 0x0c,
+ 0x5e, 0x3b, 0x8e, 0xf1, 0xad, 0x75, 0x74, 0xed,
+ 0x34, 0x3e, 0xdc, 0x27, 0x43, 0xcc, 0x94, 0xc9,
+ 0x90, 0xe1, 0xf1, 0xfd, 0x26, 0x42, 0x53, 0xc1,
+ 0x78, 0xde, 0xa7, 0x39, 0xc0, 0xbe, 0xfe, 0xeb,
+ 0xcd, 0x9f, 0x9b, 0x76, 0xd4, 0x9c, 0x10, 0x15,
+ 0xc9, 0xfe, 0xcf, 0x50, 0xe5, 0x3b, 0x8b, 0x52,
+ 0x04, 0xdb, 0xcd, 0x3e, 0xed, 0x86, 0x38, 0x55,
+ 0xda, 0xbc, 0xdc, 0xc9, 0x4b, 0x31, 0xe3, 0x18,
+ 0x02, 0x15, 0x68, 0x85, 0x5c, 0x8b, 0x9e, 0x52,
+ 0xa9, 0x81, 0x95, 0x7a, 0x11, 0x28, 0x27, 0xf9,
+ 0x78, 0xba, 0x96, 0x0f, 0x14, 0x47, 0x91, 0x1b,
+ 0x31, 0x7b, 0x55, 0x11, 0xfb, 0xcc, 0x7f, 0xb1,
+ 0x3a, 0xc1, 0x53, 0xdb, 0x74, 0x25, 0x11, 0x17,
+ 0xe4, 0x86, 0x1e, 0xb9, 0xe8, 0x3b, 0xff, 0xff,
+ 0xc4, 0xeb, 0x77, 0x55, 0x57, 0x90, 0x38, 0xe5,
+ 0x79, 0x24, 0xb1, 0xf7, 0x8b, 0x3e, 0x1a, 0xd9,
+ 0x0b, 0xab, 0x2a, 0x07, 0x87, 0x1b, 0x72, 0xdb,
+ 0x5e, 0xef, 0x96, 0xc3, 0x34, 0x04, 0x49, 0x66,
+ 0xdb, 0x0c, 0x37, 0xca, 0xfd, 0x1a, 0x89, 0xe5,
+ 0x64, 0x6a, 0x35, 0x80, 0xeb, 0x64, 0x65, 0xf1,
+ 0x21, 0xdc, 0xe9, 0xcb, 0x88, 0xd8, 0x5b, 0x96,
+ 0xcf, 0x23, 0xcc, 0xcc, 0xd4, 0x28, 0x07, 0x67,
+ 0xbe, 0xe8, 0xee, 0xb2, 0x3d, 0x86, 0x52, 0x46,
+ 0x1d, 0xb6, 0x49, 0x31, 0x03, 0x00, 0x3b, 0xaf,
+ 0x89, 0xf5, 0xe1, 0x82, 0x61, 0xea, 0x43, 0xc8,
+ 0x4a, 0x92, 0xeb, 0xff, 0xff, 0xe4, 0x90, 0x9d,
+ 0xc4, 0x6c, 0x51, 0x92, 0xf8, 0x25, 0xf7, 0x70,
+ 0x60, 0x0b, 0x96, 0x02, 0xc5, 0x57, 0xb5, 0xf8,
+ 0xb4, 0x31, 0xa7, 0x9d, 0x45, 0x97, 0x7d, 0xd9,
+ 0xc4, 0x1b, 0x86, 0x3d, 0xa9, 0xe1, 0x42, 0xe9,
+ 0x00, 0x20, 0xcf, 0xd0, 0x74, 0xd6, 0x92, 0x7b,
+ 0x7a, 0xb3, 0xb6, 0x72, 0x5d, 0x1a, 0x6f, 0x3f,
+ 0x98, 0xb9, 0xc9, 0xda, 0xa8, 0x98, 0x2a, 0xff,
+ 0x06, 0x78, 0x2f
+};
+
+static const uint8_t T6_CTR_BIT[] = { 0 };
+static const uint8_t A6_CTR_BIT[] = { 0 };
+#define A6_CTR_BIT_len 0
+#define P6_CTR_BIT_len 3861
+
+static const struct gcm_ctr_vector ctr_vectors[] = {
+ /*
+ * field order {K, Klen, IV, IVlen, A, Alen, P, Plen, C, T, Tlen};
+ * original vector does not have a valid sub hash key
+ */
+ vector(1_CTR),
+ vector(2_CTR),
+ vector(3_CTR),
+ vector(4_CTR),
+ vector(5_CTR),
+ vector(6_CTR),
+ vector(7_CTR),
+ vector(8_CTR),
+ vector(9_CTR),
+ vector(10_CTR),
+ vector(11_CTR),
+ vector(12_CTR),
+ vector(13_CTR),
+ vector(14_CTR),
+ vector(15_CTR)
+};
+
+static const struct gcm_ctr_vector ctr_bit_vectors[] = {
+ bit_vector(1_CTR_BIT),
+ bit_vector(2_CTR_BIT),
+ bit_vector(3_CTR_BIT),
+ bit_vector(4_CTR_BIT),
+ bit_vector(5_CTR_BIT),
+ bit_vector(6_CTR_BIT)
+};
+
+static int
+test_ctr(struct MB_MGR *mb_mgr,
+ const void *expkey,
+ unsigned key_len,
+ const void *iv,
+ unsigned iv_len,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ const JOB_CIPHER_MODE alg)
+{
+ uint32_t text_byte_len;
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t *target;
+ int ret = -1;
+
+ /* Get number of bytes (in case algo is CNTR_BITLEN) */
+ if (alg == CNTR)
+ text_byte_len = text_len;
+ else
+ text_byte_len = BYTE_ROUND_UP(text_len);
+
+ target = malloc(text_byte_len + (sizeof(padding) * 2));
+ if (target == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+
+ memset(target, -1, text_byte_len + (sizeof(padding) * 2));
+ memset(padding, -1, sizeof(padding));
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ job->dst = target + 16;
+ job->src = in_text;
+ job->cipher_mode = alg;
+ job->aes_enc_key_expanded = expkey;
+ job->aes_dec_key_expanded = expkey;
+ job->aes_key_len_in_bytes = key_len;
+ job->iv = iv;
+ job->iv_len_in_bytes = iv_len;
+ job->cipher_start_src_offset_in_bytes = 0;
+ if (alg == CNTR)
+ job->msg_len_to_cipher_in_bytes = text_byte_len;
+ else
+ job->msg_len_to_cipher_in_bits = text_len;
+
+ job->hash_alg = NULL_HASH;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (!job) {
+ printf("%d Unexpected null return from submit_job\n", __LINE__);
+ goto end;
+ }
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d", __LINE__, job->status);
+ goto end;
+ }
+ job = IMB_FLUSH_JOB(mb_mgr);
+ if (job) {
+ printf("%u Unexpected return from flush_job\n", __LINE__);
+ goto end;
+ }
+
+ if (memcmp(out_text, target + 16, text_byte_len)) {
+ printf("mismatched\n");
+ hexdump(stderr, "Target", target, text_byte_len + 32);
+ hexdump(stderr, "Expected", out_text, text_byte_len);
+ goto end;
+ }
+ if (memcmp(padding, target, sizeof(padding))) {
+ printf("overwrite head\n");
+ hexdump(stderr, "Target", target, text_byte_len + 32);
+ goto end;
+ }
+ if (memcmp(padding, target + sizeof(padding) + text_byte_len,
+ sizeof(padding))) {
+ printf("overwrite tail\n");
+ hexdump(stderr, "Target", target, text_byte_len + 32);
+ goto end;
+ }
+ ret = 0;
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+ end:
+ if (target != NULL)
+ free(target);
+ return ret;
+}
+
+static int
+test_ctr_vectors(struct MB_MGR *mb_mgr, const struct gcm_ctr_vector *vectors,
+ const uint32_t vectors_cnt, const JOB_CIPHER_MODE alg)
+{
+ uint32_t vect;
+ int errors = 0;
+ DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+ DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+
+ printf("AES-CTR standard test vectors:\n");
+ for (vect = 0; vect < vectors_cnt; vect++) {
+#ifdef DEBUG
+ if (alg == CNTR)
+ printf("Standard vector %d/%d Keylen:%d IVlen:%d PTLen:%d ",
+ vect, vectors_cnt - 1,
+ (int) vectors[vect].Klen,
+ (int) vectors[vect].IVlen,
+ (int) vectors[vect].Plen);
+ else
+ printf("Bit vector %d/%d Keylen:%d IVlen:%d PTLen:%d ",
+ vect, vectors_cnt - 1,
+ (int) vectors[vect].Klen,
+ (int) vectors[vect].IVlen,
+ (int) vectors[vect].Plen);
+#else
+ printf(".");
+#endif
+
+
+ switch (vectors[vect].Klen) {
+ case BITS_128:
+ IMB_AES_KEYEXP_128(mb_mgr, vectors[vect].K,
+ expkey, dust);
+ break;
+ case BITS_192:
+ IMB_AES_KEYEXP_192(mb_mgr, vectors[vect].K,
+ expkey, dust);
+ break;
+ case BITS_256:
+ IMB_AES_KEYEXP_256(mb_mgr, vectors[vect].K,
+ expkey, dust);
+ break;
+ default:
+ return -1;
+ }
+
+ if (test_ctr(mb_mgr,
+ expkey, vectors[vect].Klen,
+ vectors[vect].IV,
+ (unsigned) vectors[vect].IVlen,
+ vectors[vect].P, vectors[vect].C,
+ (unsigned) vectors[vect].Plen,
+ ENCRYPT, CIPHER_HASH, alg)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_ctr(mb_mgr,
+ expkey, vectors[vect].Klen,
+ vectors[vect].IV,
+ (unsigned) vectors[vect].IVlen,
+ vectors[vect].C, vectors[vect].P,
+ (unsigned) vectors[vect].Plen,
+ DECRYPT, HASH_CIPHER, alg)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (vectors[vect].IVlen == 12) {
+ /* IV in the table didn't
+ * include block counter (12 bytes).
+ * Let's encrypt & decrypt the same but
+ * with 16 byte IV that includes block counter.
+ */
+ const unsigned new_iv_len = 16;
+ const unsigned orig_iv_len = 12;
+ uint8_t local_iv[16];
+
+ memcpy(local_iv, vectors[vect].IV, orig_iv_len);
+ /* 32-bit 0x01000000 in LE */
+ local_iv[12] = 0x00;
+ local_iv[13] = 0x00;
+ local_iv[14] = 0x00;
+ local_iv[15] = 0x01;
+
+ if (test_ctr(mb_mgr,
+ expkey, vectors[vect].Klen,
+ local_iv, new_iv_len,
+ vectors[vect].P, vectors[vect].C,
+ (unsigned) vectors[vect].Plen,
+ ENCRYPT, CIPHER_HASH, alg)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_ctr(mb_mgr,
+ expkey, vectors[vect].Klen,
+ local_iv, new_iv_len,
+ vectors[vect].C, vectors[vect].P,
+ (unsigned) vectors[vect].Plen,
+ DECRYPT, HASH_CIPHER, alg)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+ctr_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors;
+
+ (void) arch; /* unused */
+
+ const uint32_t ctr_vec_cnt = DIM(ctr_vectors);
+ const uint32_t ctr_bit_vec_cnt = DIM(ctr_bit_vectors);
+
+ /* Standard CTR vectors */
+ errors = test_ctr_vectors(mb_mgr, ctr_vectors, ctr_vec_cnt, CNTR);
+
+ /* CTR_BITLEN vectors */
+ errors += test_ctr_vectors(mb_mgr, ctr_bit_vectors, ctr_bit_vec_cnt,
+ CNTR_BITLEN);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.c
new file mode 100644
index 000000000..711d9bcf5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.c
@@ -0,0 +1,311 @@
+/*****************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <intel-ipsec-mb.h>
+
+#include "customop_test.h"
+
+#define DIM(_a) (sizeof(_a) / sizeof(_a[0]))
+
+#ifdef DEBUG
+#ifdef _WIN32
+#define TRACE(fmt, ...) fprintf(stderr, "%s:%d "fmt, \
+ __FUNCTION__, __LINE__, __VA_ARGS__)
+#else
+#define TRACE(fmt, ...) fprintf(stderr, "%s:%d "fmt, \
+ __func__, __LINE__, __VA_ARGS__)
+#endif
+#else
+# define TRACE(fmt, ...)
+#endif
+
+struct cipher_attr_s {
+ const char *name;
+ JOB_CIPHER_MODE mode;
+ unsigned key_len;
+ unsigned iv_len;
+};
+
+struct auth_attr_s {
+ const char *name;
+ JOB_HASH_ALG hash;
+ unsigned tag_len;
+};
+
+struct test_vec_s {
+ uint8_t iv[16];
+ uint8_t txt[64];
+ uint8_t tag[32];
+ uint8_t verify[32];
+
+ DECLARE_ALIGNED(uint8_t enc_key[16*16], 64);
+ DECLARE_ALIGNED(uint8_t dec_key[16*16], 64);
+ uint8_t ipad[256];
+ uint8_t opad[256];
+ const struct cipher_attr_s *cipher;
+ const struct auth_attr_s *auth;
+
+ unsigned seq;
+};
+
+/*
+ * addon cipher function
+ */
+static int
+cipher_addon(struct JOB_AES_HMAC *job)
+{
+#ifdef DEBUG
+ struct test_vec_s *node = job->user_data;
+#endif
+
+ TRACE("Seq:%u Cipher Addon cipher:%s auth:%s\n",
+ node->seq, node->cipher->name, node->auth->name);
+
+ if (job->cipher_direction == ENCRYPT)
+ memset(job->dst, 1, job->msg_len_to_cipher_in_bytes);
+ else
+ memset(job->dst, 2, job->msg_len_to_cipher_in_bytes);
+
+ return 0; /* success */
+}
+
+/*
+ * addon hash function
+ */
+static int
+hash_addon(struct JOB_AES_HMAC *job)
+{
+#ifdef DEBUG
+ struct test_vec_s *node = job->user_data;
+#endif
+
+ TRACE("Seq:%u Auth Addon cipher:%s auth:%s\n",
+ node->seq, node->cipher->name, node->auth->name);
+
+ memset(job->auth_tag_output, 3, job->auth_tag_output_len_in_bytes);
+ return 0; /* success */
+}
+
+/*
+ * test cipher functions
+ */
+static const struct auth_attr_s auth_attr_tab[] = {
+ { "SHA1", SHA1, 12 },
+ { "SHA224", SHA_224, 14 },
+ { "SHA256", SHA_256, 16 },
+ { "SHA384", SHA_384, 24 },
+ { "SHA512", SHA_512, 32 },
+ { "MD5", MD5, 12 },
+ { "CUSTOM_HASH", CUSTOM_HASH, 16 }
+};
+
+/*
+ * test hash functions
+ */
+static const struct cipher_attr_s cipher_attr_tab[] = {
+ { "CBC128", CBC, 16, 16 },
+ { "CBC192", CBC, 24, 16 },
+ { "CBC256", CBC, 32, 16 },
+ { "CUSTOM_CIPHER", CUSTOM_CIPHER, 32, 12 },
+ { "CTR128", CNTR, 16, 12 },
+ { "CTR192", CNTR, 24, 12 },
+ { "CTR256", CNTR, 32, 12 }
+};
+
+static int
+job_check(const struct JOB_AES_HMAC *job)
+{
+#ifdef DEBUG
+ struct test_vec_s *done = job->user_data;
+#endif
+
+ TRACE("done Seq:%u Cipher:%s Auth:%s\n",
+ done->seq, done->cipher->name, done->auth->name);
+
+ if (job->status != STS_COMPLETED) {
+ TRACE("failed job status:%d\n", job->status);
+ return -1;
+ }
+ if (job->cipher_mode == CUSTOM_CIPHER) {
+ if (job->cipher_direction == ENCRYPT) {
+ unsigned i;
+
+ for (i = 0; i < job->msg_len_to_cipher_in_bytes; i++) {
+ if (job->dst[i] != 1) {
+ TRACE("NG add-on encryption %u\n", i);
+ return -1;
+ }
+ }
+ TRACE("Addon encryption passes Seq:%u\n", done->seq);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < job->msg_len_to_cipher_in_bytes; i++) {
+ if (job->dst[i] != 2) {
+ TRACE("NG add-on decryption %u\n", i);
+ return -1;
+ }
+ }
+ TRACE("Addon decryption passes Seq:%u\n", done->seq);
+ }
+ }
+
+ if (job->hash_alg == CUSTOM_HASH) {
+ unsigned i;
+
+ for (i = 0; i < job->auth_tag_output_len_in_bytes; i++) {
+ if (job->auth_tag_output[i] != 3) {
+ TRACE("NG add-on hashing %u\n", i);
+ return -1;
+ }
+ }
+ TRACE("Addon hashing passes Seq:%u\n", done->seq);
+ }
+ return 0;
+}
+
+
+int
+customop_test(struct MB_MGR *mgr)
+{
+ struct test_vec_s test_tab[DIM(cipher_attr_tab) * DIM(auth_attr_tab)];
+ struct JOB_AES_HMAC *job;
+ unsigned i, j, seq;
+ int result = 0;
+
+ memset(test_tab, 0, sizeof(test_tab));
+ for (i = 0, seq = 0; i < DIM(cipher_attr_tab); i++) {
+ for (j = 0; j < DIM(auth_attr_tab); j++) {
+ assert(seq < DIM(test_tab));
+ test_tab[seq].seq = seq;
+ test_tab[seq].cipher = &cipher_attr_tab[i];
+ test_tab[seq].auth = &auth_attr_tab[j];
+ seq++;
+ }
+ }
+
+ /* encryption */
+ for (i = 0; i < seq; i++) {
+ struct test_vec_s *node = &test_tab[i];
+
+ while ((job = IMB_GET_NEXT_JOB(mgr)) == NULL) {
+ job = IMB_FLUSH_JOB(mgr);
+ result |= job_check(job);
+ }
+
+ job->cipher_func = cipher_addon;
+ job->hash_func = hash_addon;
+
+ job->aes_enc_key_expanded = node->enc_key;
+ job->aes_dec_key_expanded = node->dec_key;
+ job->aes_key_len_in_bytes = node->cipher->key_len;
+ job->src = node->txt;
+ job->dst = node->txt;
+ job->cipher_start_src_offset_in_bytes = 16;
+ job->msg_len_to_cipher_in_bytes = sizeof(node->txt);
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes =
+ sizeof(node->txt) + sizeof(node->iv);
+ job->iv = node->iv;
+ job->iv_len_in_bytes = node->cipher->iv_len;
+ job->auth_tag_output = node->tag;
+ job->auth_tag_output_len_in_bytes = node->auth->tag_len;
+
+ job->u.HMAC._hashed_auth_key_xor_ipad = node->ipad;
+ job->u.HMAC._hashed_auth_key_xor_opad = node->opad;
+ job->cipher_mode = node->cipher->mode;
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = CIPHER_HASH;
+ job->hash_alg = node->auth->hash;
+ job->user_data = node;
+
+ job = IMB_SUBMIT_JOB(mgr);
+ while (job) {
+ result |= job_check(job);
+ job = IMB_GET_COMPLETED_JOB(mgr);
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mgr)) != NULL)
+ result |= job_check(job);
+
+ /* decryption */
+ for (i = 0; i < seq; i++) {
+ struct test_vec_s *node = &test_tab[i];
+
+ while ((job = IMB_GET_NEXT_JOB(mgr)) == NULL) {
+ job = IMB_FLUSH_JOB(mgr);
+ result |= job_check(job);
+ }
+
+ job->cipher_func = cipher_addon;
+ job->hash_func = hash_addon;
+
+ job->aes_enc_key_expanded = node->enc_key;
+ job->aes_dec_key_expanded = node->dec_key;
+ job->aes_key_len_in_bytes = node->cipher->key_len;
+ job->src = node->txt;
+ job->dst = node->txt;
+ job->cipher_start_src_offset_in_bytes = 16;
+ job->msg_len_to_cipher_in_bytes = sizeof(node->txt);
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes =
+ sizeof(node->txt) + sizeof(node->iv);
+ job->iv = node->iv;
+ job->iv_len_in_bytes = node->cipher->iv_len;
+ job->auth_tag_output = node->tag;
+ job->auth_tag_output_len_in_bytes = node->auth->tag_len;
+
+ job->u.HMAC._hashed_auth_key_xor_ipad = node->ipad;
+ job->u.HMAC._hashed_auth_key_xor_opad = node->opad;
+ job->cipher_mode = node->cipher->mode;
+ job->cipher_direction = DECRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->hash_alg = node->auth->hash;
+ job->user_data = node;
+
+ job = IMB_SUBMIT_JOB(mgr);
+ while (job) {
+ result |= job_check(job);
+ job = IMB_GET_COMPLETED_JOB(mgr);
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mgr)) != NULL)
+ result |= job_check(job);
+
+ if (result)
+ fprintf(stdout, "Custom cipher/auth test failed!\n");
+ else
+ fprintf(stdout, "Custom cipher/auth test passed\n");
+
+ return result;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.h b/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.h
new file mode 100644
index 000000000..2af275aea
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/customop_test.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CUSTOMOP_TEST_H_
+#define _CUSTOMOP_TEST_H_
+
+struct MB_MGR;
+extern int customop_test(struct MB_MGR *state);
+
+#endif /* !_CUSTOMOP_TEST_H_ */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/des_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/des_test.c
new file mode 100644
index 000000000..989a3a6fd
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/des_test.c
@@ -0,0 +1,731 @@
+/*****************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+
+#ifndef DIM
+#define DIM(x) (sizeof(x) / sizeof(x[0]))
+#endif
+
+int des_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+struct des_vector {
+ const uint8_t *K; /* key */
+ const uint8_t *IV; /* initialization vector */
+ const uint8_t *P; /* plain text */
+ uint64_t Plen; /* plain text length */
+ const uint8_t *C; /* cipher text - same length as plain text */
+};
+
+struct des3_vector {
+ const uint8_t *K1; /* key */
+ const uint8_t *K2; /* key */
+ const uint8_t *K3; /* key */
+ const uint8_t *IV; /* initialization vector */
+ const uint8_t *P; /* plain text */
+ uint64_t Plen; /* plain text length */
+ const uint8_t *C; /* cipher text - same length as plain text */
+};
+
+/* CM-SP-SECv3.1-I07-170111 I.7 */
+static const uint8_t K1[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t IV1[] = {
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a
+};
+static const uint8_t P1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x88, 0x41, 0x65, 0x06
+};
+static const uint8_t C1[] = {
+ 0x0d, 0xda, 0x5a, 0xcb, 0xd0, 0x5e, 0x55, 0x67,
+ 0x9f, 0x04, 0xd1, 0xb6, 0x41, 0x3d, 0x4e, 0xed
+};
+
+static const uint8_t K2[] = {
+ 0x3b, 0x38, 0x98, 0x37, 0x15, 0x20, 0xf7, 0x5e
+};
+static const uint8_t IV2[] = {
+ 0x02, 0xa8, 0x11, 0x77, 0x4d, 0xcd, 0xe1, 0x3b
+};
+static const uint8_t P2[] = {
+ 0x05, 0xef, 0xf7, 0x00, 0xe9, 0xa1, 0x3a, 0xe5,
+ 0xca, 0x0b, 0xcb, 0xd0, 0x48, 0x47, 0x64, 0xbd,
+ 0x1f, 0x23, 0x1e, 0xa8, 0x1c, 0x7b, 0x64, 0xc5,
+ 0x14, 0x73, 0x5a, 0xc5, 0x5e, 0x4b, 0x79, 0x63,
+ 0x3b, 0x70, 0x64, 0x24, 0x11, 0x9e, 0x09, 0xdc,
+ 0xaa, 0xd4, 0xac, 0xf2, 0x1b, 0x10, 0xaf, 0x3b,
+ 0x33, 0xcd, 0xe3, 0x50, 0x48, 0x47, 0x15, 0x5c,
+ 0xbb, 0x6f, 0x22, 0x19, 0xba, 0x9b, 0x7d, 0xf5
+
+};
+static const uint8_t C2[] = {
+ 0xf3, 0x31, 0x8d, 0x01, 0x19, 0x4d, 0xa8, 0x00,
+ 0xa4, 0x2c, 0x10, 0xb5, 0x33, 0xd6, 0xbc, 0x11,
+ 0x97, 0x59, 0x2d, 0xcc, 0x9b, 0x5d, 0x35, 0x9a,
+ 0xc3, 0x04, 0x5d, 0x07, 0x4c, 0x86, 0xbf, 0x72,
+ 0xe5, 0x1a, 0x72, 0x25, 0x82, 0x22, 0x54, 0x03,
+ 0xde, 0x8b, 0x7a, 0x58, 0x5c, 0x6c, 0x28, 0xdf,
+ 0x41, 0x0e, 0x38, 0xd6, 0x2a, 0x86, 0xe3, 0x4f,
+ 0xa2, 0x7c, 0x22, 0x39, 0x60, 0x06, 0x03, 0x6f
+};
+
+static struct des_vector vectors[] = {
+ {K1, IV1, P1, sizeof(P1), C1},
+ {K2, IV2, P2, sizeof(P2), C2},
+};
+
+/* CM-SP-SECv3.1-I07-170111 I.7 */
+static const uint8_t DK1[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t DIV1[] = {
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a
+};
+static const uint8_t DP1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x88, 0x41, 0x65, 0x06
+};
+static const uint8_t DC1[] = {
+ 0x0d, 0xda, 0x5a, 0xcb, 0xd0, 0x5e, 0x55, 0x67,
+ 0x9f, 0x04, 0xd1, 0xb6, 0x41, 0x3d, 0x4e, 0xed
+};
+
+static const uint8_t DK2[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t DIV2[] = {
+ 0x81, 0x0e, 0x52, 0x8e, 0x1c, 0x5f, 0xda, 0x1a
+};
+static const uint8_t DP2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x91,
+ 0xd2, 0xd1, 0x9f
+};
+static const uint8_t DC2[] = {
+ 0x0d, 0xda, 0x5a, 0xcb, 0xd0, 0x5e, 0x55, 0x67,
+ 0x51, 0x47, 0x46, 0x86, 0x8a, 0x71, 0xe5, 0x77,
+ 0xef, 0xac, 0x88
+};
+
+static const uint8_t DK3[] = {
+ 0xe6, 0x60, 0x0f, 0xd8, 0x85, 0x2e, 0xf5, 0xab
+};
+static const uint8_t DIV3[] = {
+ 0x51, 0x47, 0x46, 0x86, 0x8a, 0x71, 0xe5, 0x77
+};
+static const uint8_t DP3[] = {
+ 0xd2, 0xd1, 0x9f
+};
+static const uint8_t DC3[] = {
+ 0xef, 0xac, 0x88
+};
+
+
+static struct des_vector docsis_vectors[] = {
+ {DK1, DIV1, DP1, sizeof(DP1), DC1},
+ {DK2, DIV2, DP2, sizeof(DP2), DC2},
+ {DK3, DIV3, DP3, sizeof(DP3), DC3},
+};
+
+/* 3DES vectors - 2x and 3x keys */
+
+static const uint8_t D3K1_1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3K2_1[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3K3_1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3IV_1[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+
+static const uint8_t D3PT_1[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const uint8_t D3CT_1[] = {
+ 0xdf, 0x0b, 0x6c, 0x9c, 0x31, 0xcd, 0x0c, 0xe4
+};
+
+#define D3PT_LEN_1 8
+
+static const uint8_t D3K1_2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3K2_2[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3K3_2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3IV_2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3PT_2[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3CT_2[] = {
+ 0xdd, 0xad, 0xa1, 0x61, 0xe8, 0xd7, 0x96, 0x73,
+ 0xed, 0x75, 0x32, 0xe5, 0x92, 0x23, 0xcd, 0x0d
+};
+
+#define D3PT_LEN_2 16
+
+static const uint8_t D3K1_3[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3K2_3[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3K3_3[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+
+static const uint8_t D3IV_3[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3PT_3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const uint8_t D3CT_3[] = {
+ 0x58, 0xed, 0x24, 0x8f, 0x77, 0xf6, 0xb1, 0x9e
+};
+
+#define D3PT_LEN_3 8
+
+static const uint8_t D3K1_4[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3K2_4[] = {
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3K3_4[] = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+
+static const uint8_t D3IV_4[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static const uint8_t D3PT_4[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const uint8_t D3CT_4[] = {
+ 0x89, 0x4b, 0xc3, 0x08, 0x54, 0x26, 0xa4, 0x41,
+ 0xf2, 0x7f, 0x73, 0xae, 0x26, 0xab, 0xbf, 0x74
+};
+
+#define D3PT_LEN_4 16
+
+static struct des3_vector des3_vectors[] = {
+ { D3K1_1, D3K2_1, D3K3_1, D3IV_1, D3PT_1, D3PT_LEN_1, D3CT_1 },
+ { D3K1_2, D3K2_2, D3K3_2, D3IV_2, D3PT_2, D3PT_LEN_2, D3CT_2 },
+ { D3K1_3, D3K2_3, D3K3_3, D3IV_3, D3PT_3, D3PT_LEN_3, D3CT_3 },
+ { D3K1_4, D3K2_4, D3K3_4, D3IV_4, D3PT_4, D3PT_LEN_4, D3CT_4 },
+};
+
+static int
+test_des_many(struct MB_MGR *mb_mgr,
+ const uint64_t *ks,
+ const uint64_t *ks2,
+ const uint64_t *ks3,
+ const void *iv,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ JOB_CIPHER_MODE cipher,
+ const int in_place,
+ const int num_jobs)
+{
+ const void *ks_ptr[3]; /* 3DES */
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **targets = malloc(num_jobs * sizeof(void *));
+ int i, jobs_rx = 0, ret = -1;
+
+ assert(targets != NULL);
+
+ memset(padding, -1, sizeof(padding));
+
+ for (i = 0; i < num_jobs; i++) {
+ targets[i] = malloc(text_len + (sizeof(padding) * 2));
+ memset(targets[i], -1, text_len + (sizeof(padding) * 2));
+ if (in_place) {
+ /* copy input text to the allocated buffer */
+ memcpy(targets[i] + sizeof(padding), in_text, text_len);
+ }
+ }
+
+ /* Used in 3DES only */
+ ks_ptr[0] = ks;
+ ks_ptr[1] = ks2;
+ ks_ptr[2] = ks3;
+
+ /* flush the scheduler */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (!in_place) {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = in_text;
+ } else {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = targets[i] + sizeof(padding);
+ }
+ job->cipher_mode = cipher;
+ if (cipher == DES3) {
+ job->aes_enc_key_expanded = (const void *) ks_ptr;
+ job->aes_dec_key_expanded = (const void *) ks_ptr;
+ job->aes_key_len_in_bytes = 24; /* 3x keys only */
+ } else {
+ job->aes_enc_key_expanded = ks;
+ job->aes_dec_key_expanded = ks;
+ job->aes_key_len_in_bytes = 8;
+ }
+ job->iv = iv;
+ job->iv_len_in_bytes = 8;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = text_len;
+ job->user_data = (void *)((uint64_t)i);
+
+ job->hash_alg = NULL_HASH;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != NULL) {
+ const int num = (const int)((uint64_t)job->user_data);
+
+ jobs_rx++;
+ if (job->status != STS_COMPLETED) {
+ printf("%d error status:%d, job %d",
+ __LINE__, job->status, num);
+ goto end;
+ }
+ if (memcmp(out_text, targets[num] + sizeof(padding),
+ text_len)) {
+ printf("%d mismatched\n", num);
+ goto end;
+ }
+ if (memcmp(padding, targets[num], sizeof(padding))) {
+ printf("%d overwrite head\n", num);
+ goto end;
+ }
+ if (memcmp(padding,
+ targets[num] + sizeof(padding) + text_len,
+ sizeof(padding))) {
+ printf("%d overwrite tail\n", num);
+ goto end;
+ }
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ const int num = (const int)((uint64_t)job->user_data);
+
+ jobs_rx++;
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d, job %d",
+ __LINE__, job->status, num);
+ goto end;
+ }
+ if (memcmp(out_text, targets[num] + sizeof(padding),
+ text_len)) {
+ printf("%d mismatched\n", num);
+ goto end;
+ }
+ if (memcmp(padding, targets[num], sizeof(padding))) {
+ printf("%d overwrite head\n", num);
+ goto end;
+ }
+ if (memcmp(padding, targets[num] + sizeof(padding) + text_len,
+ sizeof(padding))) {
+ printf("%d overwrite tail\n", num);
+ goto end;
+ }
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++)
+ free(targets[i]);
+ free(targets);
+ return ret;
+}
+
+static int
+test_des_one(struct MB_MGR *mb_mgr,
+ const enum arch_type arch,
+ const uint64_t *ks,
+ const uint64_t *ks2,
+ const uint64_t *ks3,
+ const void *iv,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ JOB_CIPHER_MODE cipher,
+ const int in_place)
+{
+ const void *ks_ptr[3]; /* 3DES */
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t *target = malloc(text_len + (sizeof(padding) * 2));
+ int ret = -1;
+
+ assert(target != NULL);
+
+ memset(target, -1, text_len + (sizeof(padding) * 2));
+ memset(padding, -1, sizeof(padding));
+
+ if (in_place) {
+ /* copy input text to the allocated buffer */
+ memcpy(target + sizeof(padding), in_text, text_len);
+ }
+
+ /* Used in 3DES only */
+ ks_ptr[0] = ks;
+ ks_ptr[1] = ks2;
+ ks_ptr[2] = ks3;
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (!in_place) {
+ job->dst = target + sizeof(padding);
+ job->src = in_text;
+ } else {
+ job->dst = target + sizeof(padding);
+ job->src = target + sizeof(padding);
+ }
+ job->cipher_mode = cipher;
+ if (cipher == DES3) {
+ job->aes_enc_key_expanded = (const void *) ks_ptr;
+ job->aes_dec_key_expanded = (const void *) ks_ptr;
+ job->aes_key_len_in_bytes = 24;
+ } else {
+ job->aes_enc_key_expanded = ks;
+ job->aes_dec_key_expanded = ks;
+ job->aes_key_len_in_bytes = 8;
+ }
+ job->iv = iv;
+ job->iv_len_in_bytes = 8;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = text_len;
+
+ job->hash_alg = NULL_HASH;
+
+ if (arch == ARCH_AVX512) {
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ job = IMB_FLUSH_JOB(mb_mgr);
+ if (!job) {
+ printf("%d Unexpected null return from flush_job\n",
+ __LINE__);
+ goto end;
+ }
+ } else {
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (!job) {
+ printf("%d Unexpected null return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ }
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d", __LINE__, job->status);
+ goto end;
+ }
+ if (memcmp(out_text, target + sizeof(padding), text_len)) {
+ printf("mismatched\n");
+ goto end;
+ }
+ if (memcmp(padding, target, sizeof(padding))) {
+ printf("overwrite head\n");
+ goto end;
+ }
+ if (memcmp(padding, target + sizeof(padding) + text_len,
+ sizeof(padding))) {
+ printf("overwrite tail\n");
+ goto end;
+ }
+ ret = 0;
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ free(target);
+ return ret;
+}
+
+static int
+test_des(struct MB_MGR *mb_mgr,
+ const enum arch_type arch,
+ const uint64_t *ks,
+ const uint64_t *ks2,
+ const uint64_t *ks3,
+ const void *iv,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ JOB_CIPHER_MODE cipher,
+ const int in_place)
+{
+ int ret = 0;
+
+ if (cipher == DES3) {
+ if (ks2 == NULL && ks3 == NULL) {
+ ret |= test_des_one(mb_mgr, arch, ks, ks, ks, iv,
+ in_text, out_text, text_len, dir,
+ order, cipher, in_place);
+ ret |= test_des_many(mb_mgr, ks, ks, ks, iv, in_text,
+ out_text, text_len, dir, order,
+ cipher, in_place, 32);
+ } else {
+ ret |= test_des_one(mb_mgr, arch, ks, ks2, ks3, iv,
+ in_text, out_text, text_len, dir,
+ order, cipher, in_place);
+ ret |= test_des_many(mb_mgr, ks, ks2, ks3, iv, in_text,
+ out_text, text_len, dir, order,
+ cipher, in_place, 32);
+ }
+ } else {
+ ret |= test_des_one(mb_mgr, arch, ks, NULL, NULL, iv, in_text,
+ out_text, text_len, dir, order, cipher,
+ in_place);
+ ret |= test_des_many(mb_mgr, ks, NULL, NULL, iv, in_text,
+ out_text, text_len, dir, order, cipher,
+ in_place, 32);
+ }
+ return ret;
+}
+
+static int
+test_des_vectors(struct MB_MGR *mb_mgr, const enum arch_type arch,
+ const int vec_cnt,
+ const struct des_vector *vec_tab, const char *banner,
+ const JOB_CIPHER_MODE cipher)
+{
+ int vect, errors = 0;
+ uint64_t ks[16];
+
+ printf("%s:\n", banner);
+ for (vect = 0; vect < vec_cnt; vect++) {
+#ifdef DEBUG
+ printf("Standard vector %d/%d PTLen:%d\n",
+ vect + 1, vec_cnt,
+ (int) vec_tab[vect].Plen);
+#else
+ printf(".");
+#endif
+ des_key_schedule(ks, vec_tab[vect].K);
+
+ if (test_des(mb_mgr, arch, ks, NULL, NULL,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 0)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks, NULL, NULL,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 0)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks, NULL, NULL,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 1)) {
+ printf("error #%d encrypt in-place\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks, NULL, NULL,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 1)) {
+ printf("error #%d decrypt in-place\n", vect + 1);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+static int
+test_des3_vectors(struct MB_MGR *mb_mgr, const enum arch_type arch,
+ const int vec_cnt,
+ const struct des3_vector *vec_tab, const char *banner)
+{
+ int vect, errors = 0;
+ uint64_t ks1[16];
+ uint64_t ks2[16];
+ uint64_t ks3[16];
+
+ printf("%s:\n", banner);
+ for (vect = 0; vect < vec_cnt; vect++) {
+#ifdef DEBUG
+ printf("Standard vector %d/%d PTLen:%d\n",
+ vect + 1, vec_cnt,
+ (int) vec_tab[vect].Plen);
+#else
+ printf(".");
+#endif
+ des_key_schedule(ks1, vec_tab[vect].K1);
+ des_key_schedule(ks2, vec_tab[vect].K2);
+ des_key_schedule(ks3, vec_tab[vect].K3);
+
+ if (test_des(mb_mgr, arch, ks1, ks2, ks3,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, DES3, 0)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks1, ks2, ks3,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, DES3, 0)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks1, ks2, ks3,
+ vec_tab[vect].IV,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, DES3, 1)) {
+ printf("error #%d encrypt in-place\n", vect + 1);
+ errors++;
+ }
+
+ if (test_des(mb_mgr, arch, ks1, ks2, ks3,
+ vec_tab[vect].IV,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, DES3, 1)) {
+ printf("error #%d decrypt in-place\n", vect + 1);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+des_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors;
+
+ errors = test_des_vectors(mb_mgr, arch, DIM(vectors), vectors,
+ "DES standard test vectors", DES);
+
+ errors += test_des_vectors(mb_mgr, arch, DIM(docsis_vectors),
+ docsis_vectors,
+ "DOCSIS DES standard test vectors",
+ DOCSIS_DES);
+
+ errors += test_des_vectors(mb_mgr, arch, DIM(vectors), vectors,
+ "3DES (single key) standard test vectors",
+ DES3);
+
+ errors += test_des3_vectors(mb_mgr, arch, DIM(des3_vectors),
+ des3_vectors,
+ "3DES (multiple keys) test vectors");
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/direct_api_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/direct_api_test.c
new file mode 100644
index 000000000..04f6d4c3a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/direct_api_test.c
@@ -0,0 +1,1093 @@
+/*****************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <setjmp.h>
+#include <signal.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+
+#define BUF_SIZE ((uint32_t)sizeof(struct gcm_key_data))
+#define NUM_BUFS 8
+
+int
+direct_api_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/* Used to restore environment after potential segfaults */
+jmp_buf env;
+
+#ifndef DEBUG
+/* Signal handler to handle segfaults */
+static void
+seg_handler(int signum)
+{
+ (void) signum; /* unused */
+
+ signal(SIGSEGV, seg_handler); /* reset handler */
+ longjmp(env, 1); /* reset env */
+}
+#endif /* DEBUG */
+
+/*
+ * @brief Performs direct GCM API invalid param tests
+ */
+static int
+test_gcm_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ struct gcm_key_data *key_data = (struct gcm_key_data *)out_buf;
+ int seg_err; /* segfault flag */
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ /* GCM Encrypt API tests */
+ IMB_AES128_GCM_ENC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES128_GCM_ENC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_ENC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_ENC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES192_GCM_ENC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_ENC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_ENC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES256_GCM_ENC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_ENC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Decrypt API tests */
+ IMB_AES128_GCM_DEC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES128_GCM_DEC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_DEC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_ENC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES192_GCM_ENC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_DEC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_DEC(mgr, NULL, NULL, NULL, NULL, -1,
+ NULL, NULL, -1, NULL, -1);
+ IMB_AES256_GCM_DEC(mgr, NULL, NULL, out_buf, zero_buf,
+ text_len, NULL, NULL, -1, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_DEC, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Init tests */
+ IMB_AES128_GCM_INIT(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES128_GCM_INIT(mgr, NULL, (struct gcm_context_data *)out_buf,
+ NULL, NULL, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_INIT, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_INIT(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES192_GCM_INIT(mgr, NULL, (struct gcm_context_data *)out_buf,
+ NULL, NULL, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_INIT, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_INIT(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES256_GCM_INIT(mgr, NULL, (struct gcm_context_data *)out_buf,
+ NULL, NULL, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_INIT, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Encrypt update tests */
+ IMB_AES128_GCM_ENC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES128_GCM_ENC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_ENC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_ENC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES192_GCM_ENC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_ENC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_ENC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES256_GCM_ENC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_ENC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Decrypt update tests */
+ IMB_AES128_GCM_DEC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES128_GCM_DEC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_DEC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_DEC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES192_GCM_DEC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_DEC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_DEC_UPDATE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES256_GCM_DEC_UPDATE(mgr, NULL, NULL, out_buf, zero_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_DEC_UPDATE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Encrypt complete tests */
+ IMB_AES128_GCM_ENC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES128_GCM_ENC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_ENC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_ENC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES192_GCM_ENC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_ENC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_ENC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES256_GCM_ENC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_ENC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM Decrypt complete tests */
+ IMB_AES128_GCM_DEC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES128_GCM_DEC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_DEC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_DEC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES192_GCM_DEC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_DEC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_DEC_FINALIZE(mgr, NULL, NULL, NULL, -1);
+ IMB_AES256_GCM_DEC_FINALIZE(mgr, NULL, NULL, out_buf, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_DEC_FINALIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ /* GCM key data pre-processing tests */
+ IMB_AES128_GCM_PRECOMP(mgr, NULL);
+ printf(".");
+
+ IMB_AES192_GCM_PRECOMP(mgr, NULL);
+ printf(".");
+
+ IMB_AES256_GCM_PRECOMP(mgr, NULL);
+ printf(".");
+
+ IMB_AES128_GCM_PRE(mgr, NULL, NULL);
+ IMB_AES128_GCM_PRE(mgr, NULL, key_data);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_GCM_PRE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES192_GCM_PRE(mgr, NULL, NULL);
+ IMB_AES192_GCM_PRE(mgr, NULL, key_data);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES192_GCM_PRE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES256_GCM_PRE(mgr, NULL, NULL);
+ IMB_AES256_GCM_PRE(mgr, NULL, key_data);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES256_GCM_PRE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct Key expansion and
+ * generation API invalid param tests
+ */
+static int
+test_key_exp_gen_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int seg_err; /* segfault flag */
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ IMB_AES_KEYEXP_128(mgr, NULL, NULL, NULL);
+ IMB_AES_KEYEXP_128(mgr, NULL, out_buf, zero_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES_KEYEXP_128, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES_KEYEXP_192(mgr, NULL, NULL, NULL);
+ IMB_AES_KEYEXP_192(mgr, NULL, out_buf, zero_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES_KEYEXP_192, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES_KEYEXP_256(mgr, NULL, NULL, NULL);
+ IMB_AES_KEYEXP_256(mgr, NULL, out_buf, zero_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES_KEYEXP_256, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES_CMAC_SUBKEY_GEN_128(mgr, NULL, NULL, NULL);
+ IMB_AES_CMAC_SUBKEY_GEN_128(mgr, NULL, out_buf, zero_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES_CMAC_SUBKEY_GEN_128, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_AES_XCBC_KEYEXP(mgr, NULL, NULL, NULL, NULL);
+ IMB_AES_XCBC_KEYEXP(mgr, NULL, out_buf, out_buf, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES_XCBC_KEYEXP, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_DES_KEYSCHED(mgr, NULL, NULL);
+ IMB_DES_KEYSCHED(mgr, (uint64_t *)out_buf, NULL);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_DES_KEYSCHED, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct hash API invalid param tests
+ */
+static int
+test_hash_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int seg_err; /* segfault flag */
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ IMB_SHA1_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_SHA1_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA1_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA1(mgr, NULL, -1, NULL);
+ IMB_SHA1(mgr, NULL, BUF_SIZE, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA1, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA224_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_SHA224_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA224_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA224(mgr, NULL, -1, NULL);
+ IMB_SHA224(mgr, NULL, BUF_SIZE, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA224, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA256_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_SHA256_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA256_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA256(mgr, NULL, -1, NULL);
+ IMB_SHA256(mgr, NULL, BUF_SIZE, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA256, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA384_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_SHA384_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA384_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA384(mgr, NULL, -1, NULL);
+ IMB_SHA384(mgr, NULL, BUF_SIZE, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA384, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA512_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_SHA512_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA512_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SHA512(mgr, NULL, -1, NULL);
+ IMB_SHA512(mgr, NULL, BUF_SIZE, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SHA512, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_MD5_ONE_BLOCK(mgr, NULL, NULL);
+ IMB_MD5_ONE_BLOCK(mgr, NULL, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_MD5_ONE_BLOCK, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct AES API invalid param tests
+ */
+static int
+test_aes_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int seg_err; /* segfault flag */
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ IMB_AES128_CFB_ONE(mgr, NULL, NULL, NULL, NULL, -1);
+ IMB_AES128_CFB_ONE(mgr, out_buf, NULL, NULL, NULL, -1);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_AES128_CFB_ONE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct ZUC API invalid param tests
+ */
+static int
+test_zuc_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ const uint32_t inv_len = -1;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int i, ret1, ret2, seg_err; /* segfault flag */
+ void *out_bufs[NUM_BUFS];
+ uint32_t lens[NUM_BUFS];
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ out_bufs[i] = (void *)&out_buf;
+ lens[i] = text_len;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ ret1 = zuc_eea3_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, NULL);
+ ret2 = zuc_eea3_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: zuc_eea3_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = zuc_eia3_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, NULL);
+ ret2 = zuc_eia3_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: zuc_eia3_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_ZUC_EEA3_1_BUFFER(mgr, NULL, NULL, NULL, NULL, inv_len);
+ IMB_ZUC_EEA3_1_BUFFER(mgr, NULL, NULL, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_ZUC_EEA3_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_ZUC_EEA3_4_BUFFER(mgr, NULL, NULL, NULL, NULL, NULL);
+ IMB_ZUC_EEA3_4_BUFFER(mgr, NULL, NULL, NULL, out_bufs, lens);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_ZUC_EEA3_4_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_ZUC_EEA3_N_BUFFER(mgr, NULL, NULL, NULL,
+ NULL, NULL, inv_len);
+ IMB_ZUC_EEA3_N_BUFFER(mgr, NULL, NULL, NULL,
+ out_bufs, lens, NUM_BUFS);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_ZUC_EEA3_N_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_ZUC_EIA3_1_BUFFER(mgr, NULL, NULL, NULL, inv_len, NULL);
+ IMB_ZUC_EIA3_1_BUFFER(mgr, NULL, NULL, NULL, text_len, out_bufs[0]);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_ZUC_EIA3_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct KASUMI API invalid param tests
+ */
+static int
+test_kasumi_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ const uint32_t inv_len = -1;
+ const uint64_t inv_iv = -1;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int i, ret1, ret2, seg_err; /* segfault flag */
+ void *out_bufs[NUM_BUFS];
+ uint32_t lens[NUM_BUFS];
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ out_bufs[i] = (void *)&out_buf;
+ lens[i] = text_len;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ ret1 = kasumi_f8_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, NULL);
+ ret2 = kasumi_f8_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: kasumi_f8_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = kasumi_f9_iv_gen(inv_len, inv_len, NULL);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) || ret1 == 0) {
+ printf("%s: kasumi_f9_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_1_BUFFER(mgr, NULL, inv_iv, NULL, NULL, inv_len);
+ IMB_KASUMI_F8_1_BUFFER(mgr, NULL, inv_iv, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, NULL, inv_iv, NULL,
+ NULL, inv_len, inv_len);
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, NULL, inv_iv, NULL,
+ out_buf, text_len, 0);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_1_BUFFER_BIT, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_2_BUFFER(mgr, NULL, inv_iv, inv_iv, NULL,
+ NULL, inv_len, NULL, NULL, inv_len);
+ IMB_KASUMI_F8_2_BUFFER(mgr, NULL, inv_iv, inv_iv, NULL,
+ out_buf, text_len, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_2_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_3_BUFFER(mgr, NULL, inv_iv, inv_iv, inv_iv, NULL,
+ NULL, NULL, NULL, NULL, NULL, inv_len);
+ IMB_KASUMI_F8_3_BUFFER(mgr, NULL, inv_iv, inv_iv, inv_iv, NULL,
+ out_buf, NULL, out_buf, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_3_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_4_BUFFER(mgr, NULL, inv_iv, inv_iv, inv_iv, inv_iv,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ inv_len);
+ IMB_KASUMI_F8_4_BUFFER(mgr, NULL, inv_iv, inv_iv, inv_iv, inv_iv,
+ NULL, out_buf, NULL, out_buf, NULL, out_buf,
+ NULL, out_buf, inv_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_4_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F8_N_BUFFER(mgr, NULL, NULL, NULL,
+ NULL, NULL, inv_len);
+ IMB_KASUMI_F8_N_BUFFER(mgr, NULL, NULL, NULL,
+ out_bufs, lens, NUM_BUFS);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F8_N_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F9_1_BUFFER(mgr, NULL, NULL, inv_len, NULL);
+ IMB_KASUMI_F9_1_BUFFER(mgr, NULL, NULL, text_len, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F9_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_KASUMI_F9_1_BUFFER_USER(mgr, NULL, inv_iv, NULL,
+ inv_len, NULL, inv_len);
+ IMB_KASUMI_F9_1_BUFFER_USER(mgr, NULL, inv_iv, NULL,
+ text_len, out_buf, 0);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_KASUMI_F9_1_BUFFER_USER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, NULL, NULL);
+ ret2 = IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, NULL,
+ (kasumi_key_sched_t *)out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: IMB_KASUMI_INIT_F8_KEY_SCHED, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, NULL, NULL);
+ ret2 = IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, NULL,
+ (kasumi_key_sched_t *)out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: IMB_KASUMI_INIT_F9_KEY_SCHED, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ if (IMB_KASUMI_KEY_SCHED_SIZE(mgr) <= 0) {
+ printf("%s: IMB_KASUMI_KEY_SCHED_SIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+/*
+ * @brief Performs direct SNOW3G API invalid param tests
+ */
+static int
+test_snow3g_api(struct MB_MGR *mgr)
+{
+ const uint32_t text_len = BUF_SIZE;
+ const uint32_t inv_len = -1;
+ uint8_t out_buf[BUF_SIZE];
+ uint8_t zero_buf[BUF_SIZE];
+ int i, ret1, ret2, seg_err; /* segfault flag */
+ void *out_bufs[NUM_BUFS];
+ uint32_t lens[NUM_BUFS];
+
+ seg_err = setjmp(env);
+ if (seg_err) {
+ printf("%s: segfault occured!\n", __func__);
+ return 1;
+ }
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ out_bufs[i] = (void *)&out_buf;
+ lens[i] = text_len;
+ }
+
+ memset(out_buf, 0, text_len);
+ memset(zero_buf, 0, text_len);
+
+ /**
+ * API are generally tested twice:
+ * 1. test with all invalid params
+ * 2. test with some valid params (in, out, len)
+ * and verify output buffer is not modified
+ */
+
+ ret1 = snow3g_f8_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, NULL);
+ ret2 = snow3g_f8_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: snow3g_f8_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = snow3g_f9_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, NULL);
+ ret2 = snow3g_f9_iv_gen(inv_len, (const uint8_t)inv_len,
+ (const uint8_t)inv_len, out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: snow3g_f9_iv_gen, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_1_BUFFER(mgr, NULL, NULL, NULL, NULL, inv_len);
+ IMB_SNOW3G_F8_1_BUFFER(mgr, NULL, NULL, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_1_BUFFER_BIT(mgr, NULL, NULL, NULL, NULL,
+ inv_len, inv_len);
+ IMB_SNOW3G_F8_1_BUFFER_BIT(mgr, NULL, NULL, NULL, out_buf,
+ text_len, 0);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_1_BUFFER_BIT, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_2_BUFFER(mgr, NULL, NULL, NULL, NULL,
+ NULL, inv_len, NULL, NULL, inv_len);
+ IMB_SNOW3G_F8_2_BUFFER(mgr, NULL, NULL, NULL, NULL,
+ out_buf, text_len, NULL, out_buf, text_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_2_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_4_BUFFER(mgr, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, inv_len, NULL, NULL, inv_len,
+ NULL, NULL, inv_len, NULL, NULL, inv_len);
+ IMB_SNOW3G_F8_4_BUFFER(mgr, NULL, NULL, NULL, NULL, NULL,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_4_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_8_BUFFER(mgr, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, inv_len, NULL, NULL, inv_len,
+ NULL, NULL, inv_len, NULL, NULL, inv_len,
+ NULL, NULL, inv_len, NULL, NULL, inv_len,
+ NULL, NULL, inv_len, NULL, NULL, inv_len);
+ IMB_SNOW3G_F8_8_BUFFER(mgr, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len,
+ NULL, out_buf, inv_len, NULL, out_buf, inv_len);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_8_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_8_BUFFER_MULTIKEY(mgr, NULL, NULL, NULL, NULL, &inv_len);
+ IMB_SNOW3G_F8_8_BUFFER_MULTIKEY(mgr, NULL, NULL, NULL, out_bufs, lens);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_8_BUFFER_MULTIKEY, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_N_BUFFER(mgr, NULL, NULL, NULL, NULL, NULL, inv_len);
+ IMB_SNOW3G_F8_N_BUFFER(mgr, NULL, NULL, NULL, out_bufs, lens, NUM_BUFS);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_N_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F8_N_BUFFER_MULTIKEY(mgr, NULL, NULL, NULL, NULL,
+ NULL, inv_len);
+ IMB_SNOW3G_F8_N_BUFFER_MULTIKEY(mgr, NULL, NULL, NULL, out_bufs,
+ lens, NUM_BUFS);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F8_N_BUFFER_MULTIKEY, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ IMB_SNOW3G_F9_1_BUFFER(mgr, NULL, NULL, NULL, inv_len, NULL);
+ IMB_SNOW3G_F9_1_BUFFER(mgr, NULL, NULL, NULL, text_len, out_buf);
+ if (memcmp(out_buf, zero_buf, text_len) != 0) {
+ printf("%s: IMB_SNOW3G_F9_1_BUFFER, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ ret1 = IMB_SNOW3G_INIT_KEY_SCHED(mgr, NULL, NULL);
+ ret2 = IMB_SNOW3G_INIT_KEY_SCHED(mgr, NULL,
+ (snow3g_key_schedule_t *)out_buf);
+ if ((memcmp(out_buf, zero_buf, text_len) != 0) ||
+ ret1 == 0 || ret2 == 0) {
+ printf("%s: IMB_SNOW3G_INIT_KEY_SCHED, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ if (IMB_SNOW3G_KEY_SCHED_SIZE(mgr) <= 0) {
+ printf("%s: IMB_SNOW3G_KEY_SCHED_SIZE, invalid "
+ "param test failed!\n", __func__);
+ return 1;
+ }
+ printf(".");
+
+ printf("\n");
+ return 0;
+}
+
+int
+direct_api_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+ (void) arch; /* unused */
+#ifndef DEBUG
+#ifdef _WIN32
+ void *handler;
+#else
+ sighandler_t handler;
+#endif
+#endif
+ printf("Invalid Direct API arguments test:\n");
+
+ if ((mb_mgr->features & IMB_FEATURE_SAFE_PARAM) == 0) {
+ printf("SAFE_PARAM feature disabled, skipping tests\n");
+ return 0;
+ }
+#ifndef DEBUG
+ handler = signal(SIGSEGV, seg_handler);
+#endif
+
+ errors += test_gcm_api(mb_mgr);
+ errors += test_key_exp_gen_api(mb_mgr);
+ errors += test_hash_api(mb_mgr);
+ errors += test_aes_api(mb_mgr);
+ errors += test_zuc_api(mb_mgr);
+ errors += test_kasumi_api(mb_mgr);
+ errors += test_snow3g_api(mb_mgr);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+#ifndef DEBUG
+ signal(SIGSEGV, handler);
+#endif
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/do_test.h b/src/spdk/intel-ipsec-mb/LibTestApp/do_test.h
new file mode 100644
index 000000000..def7d3dd7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/do_test.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2012-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#ifndef DO_TEST_H
+#define DO_TEST_H
+
+static unsigned char key[] = {
+ 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,
+ 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f,
+ 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,
+ 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f,
+ 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,
+ 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f,
+ 0xa0
+};
+
+static unsigned char text[] = {
+ 0x53,0x61,0x6d,0x70,0x6c,0x65,0x20,0x23,0x34
+};
+
+static unsigned char hmac12[] = {
+ 0x9e,0xa8,0x86,0xef,0xe2,0x68,0xdb,0xec,0xce,0x42,0x0c,0x75
+};
+
+#define KEYSIZE sizeof(key)
+#define TEXTSIZE sizeof(text);
+
+static unsigned char plain[] = {
+ 0x6b,0xc1,0xbe,0xe2,0x2e,0x40,0x9f,0x96,
+ 0xe9,0x3d,0x7e,0x11,0x73,0x93,0x17,0x2a,
+ 0xae,0x2d,0x8a,0x57,0x1e,0x03,0xac,0x9c,
+ 0x9e,0xb7,0x6f,0xac,0x45,0xaf,0x8e,0x51,
+ 0x30,0xc8,0x1c,0x46,0xa3,0x5c,0xe4,0x11,
+ 0xe5,0xfb,0xc1,0x19,0x1a,0x0a,0x52,0xef,
+ 0xf6,0x9f,0x24,0x45,0xdf,0x4f,0x9b,0x17,
+ 0xad,0x2b,0x41,0x7b,0xe6,0x6c,0x37,0x10
+};
+
+static unsigned char key128[] = {
+ 0x2b,0x7e,0x15,0x16,0x28,0xae,0xd2,0xa6,
+ 0xab,0xf7,0x15,0x88,0x09,0xcf,0x4f,0x3c
+};
+
+/* static unsigned char key256[] = { */
+/* 0x60,0x3d,0xeb,0x10,0x15,0xca,0x71,0xbe, */
+/* 0x2b,0x73,0xae,0xf0,0x85,0x7d,0x77,0x81, */
+/* 0x1f,0x35,0x2c,0x07,0x3b,0x61,0x08,0xd7, */
+/* 0x2d,0x98,0x10,0xa3,0x09,0x14,0xdf,0xf4 */
+/* }; */
+
+static unsigned char ic[] = {
+ 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
+};
+
+static unsigned char cipherCBC128[] = {
+ 0x76,0x49,0xab,0xac,0x81,0x19,0xb2,0x46,
+ 0xce,0xe9,0x8e,0x9b,0x12,0xe9,0x19,0x7d,
+ 0x50,0x86,0xcb,0x9b,0x50,0x72,0x19,0xee,
+ 0x95,0xdb,0x11,0x3a,0x91,0x76,0x78,0xb2,
+ 0x73,0xbe,0xd6,0xb8,0xe3,0xc1,0x74,0x3b,
+ 0x71,0x16,0xe6,0x9e,0x22,0x22,0x95,0x16,
+ 0x3f,0xf1,0xca,0xa1,0x68,0x1f,0xac,0x09,
+ 0x12,0x0e,0xca,0x30,0x75,0x86,0xe1,0xa7
+};
+
+/* static unsigned char cipherCBC256[] = { */
+/* 0xf5,0x8c,0x4c,0x04,0xd6,0xe5,0xf1,0xba, */
+/* 0x77,0x9e,0xab,0xfb,0x5f,0x7b,0xfb,0xd6, */
+/* 0x9c,0xfc,0x4e,0x96,0x7e,0xdb,0x80,0x8d, */
+/* 0x67,0x9f,0x77,0x7b,0xc6,0x70,0x2c,0x7d, */
+/* 0x39,0xf2,0x33,0x69,0xa9,0xd9,0xba,0xcf, */
+/* 0xa5,0x30,0xe2,0x63,0x04,0x23,0x14,0x61, */
+/* 0xb2,0xeb,0x05,0xe2,0xc3,0x9b,0xe9,0xfc, */
+/* 0xda,0x6c,0x19,0x07,0x8c,0x6a,0x9d,0x1b */
+/* }; */
+
+#define NUMBLOCKS 4
+#define NUMBYTES (NUMBLOCKS * 16)
+
+
+static int
+known_answer_test(MB_MGR *mb_mgr)
+{
+ uint8_t test_buf[NUMBYTES];
+ uint8_t buf[64];
+ DECLARE_ALIGNED(uint32_t enc_keys[15*4], 16);
+ DECLARE_ALIGNED(uint32_t dec_keys[15*4], 16);
+ DECLARE_ALIGNED(uint8_t ipad_hash[5*4], 16);
+ DECLARE_ALIGNED(uint8_t opad_hash[5*4], 16);
+ JOB_AES_HMAC *job;
+ uint8_t iv[16];
+ uint8_t digest[12];
+ uint32_t i;
+
+ /* compute ipad hash */
+ for (i=0; i<64; i++)
+ buf[i] = 0x36;
+ for (i=0; i<KEYSIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+
+ /* compute opad hash */
+ for (i=0; i<64; i++)
+ buf[i] = 0x5c;
+ for (i=0; i<KEYSIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, opad_hash);
+
+
+ /* Expand key */
+ IMB_AES_KEYEXP_128(mb_mgr, key128, enc_keys, dec_keys);
+
+
+ /* test AES128 Dec */
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->cipher_direction = DECRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->dst = test_buf;
+ job->aes_key_len_in_bytes = 16;
+ job->auth_tag_output = digest;
+ job->auth_tag_output_len_in_bytes = 12;
+ memcpy(iv, ic, sizeof(iv));
+ job->iv = iv;
+ job->iv_len_in_bytes = 16;
+ job->src = cipherCBC128;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = NUMBYTES;
+ job->hash_start_src_offset_in_bytes = text - job->src;
+ job->msg_len_to_hash_in_bytes = TEXTSIZE;
+ job->u.HMAC._hashed_auth_key_xor_ipad = ipad_hash;
+ job->u.HMAC._hashed_auth_key_xor_opad = opad_hash;
+ job->cipher_mode = CBC;
+ job->hash_alg = SHA1;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ printf("Unexpected return from submit_job\n");
+ return 1;
+ }
+ job = IMB_FLUSH_JOB(mb_mgr);
+ if (!job) {
+ printf("Unexpected null return from flush_job\n");
+ return 1;
+ }
+ for (i=0; i<NUMBYTES; i++) {
+ if (test_buf[i] != plain[i]) {
+ printf("AES128 Dec mismatch on byte %d\n", i);
+ return 1;
+ }
+ }
+
+ for (i=0; i<12; i++) {
+ if (digest[i] != hmac12[i]) {
+ printf("HMAC/SHA1 mismatch on byte %d\n", i);
+ return 1;
+ }
+ }
+ printf("Known answer passes\n");
+ return 0;
+}
+
+static void
+test_aux_func(MB_MGR *mgr)
+{
+ /* test aux functions */
+ uint128_t keys[15] = {{0, 0}};
+ static uint8_t buf[4096+20];
+
+ uint32_t digest1[8];
+ uint64_t digest3[8];
+ DECLARE_ALIGNED(uint32_t k1_exp[15*4], 16);
+ DECLARE_ALIGNED(uint32_t k2[4], 16);
+ DECLARE_ALIGNED(uint32_t k3[4], 16);
+
+ printf("Testing aux funcs\n");
+
+ IMB_SHA1_ONE_BLOCK(mgr, buf, digest1);
+ IMB_SHA224_ONE_BLOCK(mgr, buf, digest1);
+ IMB_SHA256_ONE_BLOCK(mgr, buf, digest1);
+ IMB_SHA384_ONE_BLOCK(mgr, buf, digest3);
+ IMB_SHA512_ONE_BLOCK(mgr, buf, digest3);
+ IMB_MD5_ONE_BLOCK(mgr, buf, digest1);
+ IMB_AES_XCBC_KEYEXP(mgr, buf + 1, k1_exp, k2, k3);
+ IMB_AES_KEYEXP_128(mgr, keys, k1_exp, k1_exp);
+ IMB_AES_KEYEXP_192(mgr, keys, k1_exp, k1_exp);
+ IMB_AES_KEYEXP_256(mgr, keys, k1_exp, k1_exp);
+}
+
+static int
+do_test(MB_MGR *mb_mgr)
+{
+ uint32_t size;
+ JOB_AES_HMAC *job;
+ static uint128_t IV = {0,0};
+ static uint32_t ipad[5], opad[5], digest[3];
+ uint128_t keys[15] = {{0, 0}};
+ static uint8_t buf[4096+20];
+
+ for (size = 32; size < 4096; size += 16) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+
+ job->msg_len_to_cipher_in_bytes = size;
+ job->msg_len_to_hash_in_bytes = size + 20;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->cipher_start_src_offset_in_bytes = 20;
+
+ job->auth_tag_output = (uint8_t*) digest;
+ job->auth_tag_output_len_in_bytes = 12;
+ job->u.HMAC._hashed_auth_key_xor_ipad = (uint8_t*)ipad;
+ job->u.HMAC._hashed_auth_key_xor_opad = (uint8_t*)opad;
+
+ job->aes_enc_key_expanded =
+ job->aes_dec_key_expanded = (uint32_t*) keys;
+ job->src = buf;
+ job->dst = buf + 20;
+ job->iv = (uint8_t *) &IV;
+ job->iv_len_in_bytes = 16;
+
+ job->cipher_mode = CBC;
+ job->hash_alg = SHA1;
+
+ switch (rand() % 3) {
+ case 0:
+ job->aes_key_len_in_bytes = 16;
+ break;
+ case 1:
+ job->aes_key_len_in_bytes = 24;
+ break;
+ default:
+ job->aes_key_len_in_bytes = 32;
+ break;
+ }
+
+ switch (rand() % 4) {
+ case 0:
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = HASH_CIPHER;
+ break;
+ case 1:
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = CIPHER_HASH;
+ break;
+ case 2:
+ job->cipher_direction = DECRYPT;
+ job->chain_order = CIPHER_HASH;
+ break;
+ case 3:
+ job->cipher_direction = DECRYPT;
+ job->chain_order = HASH_CIPHER;
+ break;
+ }
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ while (job) {
+ job = IMB_GET_COMPLETED_JOB(mb_mgr);
+ } /* end while (job) */
+ } /* end for size */
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ do {
+ job = IMB_GET_COMPLETED_JOB(mb_mgr);
+ } while (job);
+ }
+
+ test_aux_func(mb_mgr);
+
+ return 0;
+}
+
+#endif /* DO_TEST_H */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/ecb_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/ecb_test.c
new file mode 100644
index 000000000..b02d84e03
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/ecb_test.c
@@ -0,0 +1,804 @@
+/*****************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int ecb_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+struct ecb_vector {
+ const uint8_t *K; /* key */
+ const uint8_t *P; /* plain text */
+ uint64_t Plen; /* plain text length */
+ const uint8_t *C; /* cipher text - same length as plain text */
+ uint32_t Klen; /* key length */
+};
+
+/* 128-bit */
+static const uint8_t K1[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t P1[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C1[] = {
+ 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60,
+ 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97,
+ 0xf5, 0xd3, 0xd5, 0x85, 0x03, 0xb9, 0x69, 0x9d,
+ 0xe7, 0x85, 0x89, 0x5a, 0x96, 0xfd, 0xba, 0xaf,
+ 0x43, 0xb1, 0xcd, 0x7f, 0x59, 0x8e, 0xce, 0x23,
+ 0x88, 0x1b, 0x00, 0xe3, 0xed, 0x03, 0x06, 0x88,
+ 0x7b, 0x0c, 0x78, 0x5e, 0x27, 0xe8, 0xad, 0x3f,
+ 0x82, 0x23, 0x20, 0x71, 0x04, 0x72, 0x5d, 0xd4
+};
+
+/* 192-bit */
+static const uint8_t K2[] = {
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b
+};
+static const uint8_t P2[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C2[] = {
+ 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f,
+ 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc,
+ 0x97, 0x41, 0x04, 0x84, 0x6d, 0x0a, 0xd3, 0xad,
+ 0x77, 0x34, 0xec, 0xb3, 0xec, 0xee, 0x4e, 0xef,
+ 0xef, 0x7a, 0xfd, 0x22, 0x70, 0xe2, 0xe6, 0x0a,
+ 0xdc, 0xe0, 0xba, 0x2f, 0xac, 0xe6, 0x44, 0x4e,
+ 0x9a, 0x4b, 0x41, 0xba, 0x73, 0x8d, 0x6c, 0x72,
+ 0xfb, 0x16, 0x69, 0x16, 0x03, 0xc1, 0x8e, 0x0e
+};
+
+/* 256-bit */
+static const uint8_t K3[] = {
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4
+};
+static const uint8_t P3[] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+};
+static const uint8_t C3[] = {
+ 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c,
+ 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8,
+ 0x59, 0x1c, 0xcb, 0x10, 0xd4, 0x10, 0xed, 0x26,
+ 0xdc, 0x5b, 0xa7, 0x4a, 0x31, 0x36, 0x28, 0x70,
+ 0xb6, 0xed, 0x21, 0xb9, 0x9c, 0xa6, 0xf4, 0xf9,
+ 0xf1, 0x53, 0xe7, 0xb1, 0xbe, 0xaf, 0xed, 0x1d,
+ 0x23, 0x30, 0x4b, 0x7a, 0x39, 0xf9, 0xf3, 0xff,
+ 0x06, 0x7d, 0x8d, 0x8f, 0x9e, 0x24, 0xec, 0xc7
+};
+
+
+/* Extra AES test vectors */
+
+/* 128-bit */
+static const uint8_t K4[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t P4[] = {
+ 0xf7, 0xcd, 0x12, 0xfb, 0x4f, 0x8e, 0x50, 0xab,
+ 0x35, 0x8e, 0x56, 0xf9, 0x83, 0x53, 0x9a, 0x1a,
+ 0xfc, 0x47, 0x3c, 0x96, 0x01, 0xfe, 0x01, 0x87,
+ 0xd5, 0xde, 0x46, 0x24, 0x5c, 0x62, 0x8f, 0xba,
+ 0xba, 0x91, 0x17, 0x8d, 0xba, 0x5a, 0x79, 0xb1,
+ 0x57, 0x05, 0x4d, 0x08, 0xba, 0x1f, 0x30, 0xd3,
+ 0x80, 0x40, 0xe9, 0x37, 0xb0, 0xd6, 0x34, 0x87,
+ 0x33, 0xdd, 0xc0, 0x5b, 0x2d, 0x58, 0x1d, 0x2a,
+ 0x7b, 0xb6, 0xe3, 0xd0, 0xc8, 0xa0, 0x7a, 0x69,
+ 0xc8, 0x5d, 0x10, 0xa2, 0xc3, 0x39, 0xca, 0xaf,
+ 0x40, 0xdc, 0xc7, 0xcb, 0xff, 0x18, 0x7d, 0x51,
+ 0x06, 0x28, 0x28, 0x1f, 0x3a, 0x9c, 0x18, 0x7d,
+ 0x5b, 0xb5, 0xe9, 0x20, 0xc2, 0xae, 0x17, 0x7f,
+ 0xd1, 0x65, 0x7a, 0x75, 0xcf, 0x21, 0xa0, 0x1e,
+ 0x17, 0x1b, 0xf7, 0xe8, 0x62, 0x5f, 0xaf, 0x34,
+ 0x7f, 0xd8, 0x18, 0x4a, 0x94, 0xf2, 0x33, 0x90
+};
+static const uint8_t C4[] = {
+ 0x48, 0xa0, 0xe8, 0x0a, 0x89, 0x99, 0xab, 0xb5,
+ 0x66, 0x6d, 0x68, 0x23, 0x43, 0x40, 0x1f, 0x26,
+ 0xac, 0x52, 0xc4, 0x7b, 0x09, 0x0a, 0x8f, 0xc0,
+ 0x38, 0x00, 0xf5, 0x48, 0x3a, 0xfd, 0xcd, 0x7e,
+ 0x21, 0xe7, 0xf8, 0xf6, 0xc2, 0xa7, 0x4c, 0x1c,
+ 0x6e, 0x83, 0x57, 0xf4, 0xa4, 0xb0, 0xc0, 0x5f,
+ 0x36, 0x73, 0x22, 0xff, 0x33, 0x44, 0xab, 0xeb,
+ 0x96, 0xa8, 0xe0, 0x37, 0x65, 0x81, 0x6b, 0x82,
+ 0x89, 0xcd, 0xcc, 0xac, 0x33, 0x18, 0x7d, 0x43,
+ 0x0e, 0x79, 0x53, 0x30, 0x21, 0x4c, 0x95, 0x18,
+ 0xb6, 0xc9, 0xea, 0x5c, 0x6f, 0xa1, 0x10, 0xa3,
+ 0x51, 0x0e, 0x67, 0x8c, 0x1c, 0x9d, 0xf1, 0x57,
+ 0xeb, 0xf6, 0xad, 0x4f, 0xf2, 0x55, 0xe8, 0x11,
+ 0x6f, 0xaa, 0x4d, 0xe5, 0x18, 0x3d, 0xc3, 0x14,
+ 0xf9, 0x40, 0xfa, 0x86, 0x9d, 0xaf, 0xff, 0xfc,
+ 0x78, 0xba, 0xbe, 0x61, 0xf8, 0xd1, 0x00, 0x8d
+};
+
+/* 192-bit */
+static const uint8_t K5[] = {
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b
+};
+static const uint8_t P5[] = {
+ 0x19, 0x08, 0xa3, 0x58, 0x17, 0x14, 0x70, 0x5a,
+ 0xb8, 0xab, 0x4f, 0x5f, 0xa4, 0x25, 0x2b, 0xec,
+ 0xb6, 0x74, 0x0b, 0x9d, 0x56, 0x3b, 0xaf, 0xa3,
+ 0xa4, 0x2d, 0x3e, 0x1f, 0x18, 0x84, 0x3b, 0x4f,
+ 0x48, 0xd9, 0xa3, 0xfe, 0x59, 0x1e, 0x80, 0x67,
+ 0x44, 0x35, 0x26, 0x00, 0x78, 0xda, 0x68, 0xfa,
+ 0x61, 0x9c, 0xd8, 0x8e, 0x5c, 0xc1, 0xff, 0xeb,
+ 0x9c, 0x7d, 0xe7, 0xa9, 0x38, 0xeb, 0x66, 0xf8,
+ 0x6a, 0x46, 0x71, 0x51, 0x02, 0xba, 0x8d, 0x70,
+ 0x55, 0x5b, 0x60, 0xc6, 0x4c, 0xae, 0xda, 0x2e,
+ 0x17, 0xbb, 0x65, 0xef, 0x60, 0x85, 0x9e, 0x77,
+ 0xe5, 0x83, 0xef, 0x30, 0x08, 0x3a, 0xba, 0x80,
+ 0x28, 0xc0, 0xa1, 0x93, 0x4c, 0x2a, 0x0b, 0xe1,
+ 0xcb, 0xd0, 0xac, 0x72, 0x72, 0x1d, 0x96, 0x76,
+ 0x0e, 0xc0, 0xec, 0x7d, 0x84, 0xfd, 0xee, 0x08,
+ 0xa1, 0x11, 0x20, 0x0d, 0x59, 0x5c, 0x06, 0x3f,
+ 0xa3, 0xf1, 0xd7, 0xa3, 0x1d, 0x29, 0xc3, 0xaa,
+ 0x05, 0x2b, 0x74, 0x8c, 0x73, 0x60, 0x65, 0x43,
+ 0x76, 0xd4, 0xd7, 0x7b, 0x5f, 0x40, 0xf4, 0x77,
+ 0xe1, 0xcc, 0x85, 0x37, 0x1c, 0xd8, 0xda, 0x91,
+ 0xf0, 0x40, 0xb2, 0x43, 0x2d, 0x87, 0x51, 0xd0,
+ 0xce, 0x27, 0xa6, 0x60, 0xac, 0x67, 0xea, 0x8b,
+ 0xae, 0x46, 0x2e, 0x78, 0x06, 0x09, 0x8a, 0x82,
+ 0xb0, 0x0d, 0x57, 0x56, 0x82, 0xfe, 0x89, 0xd2
+};
+static const uint8_t C5[] = {
+ 0xcc, 0xe2, 0x3f, 0xc3, 0x12, 0x41, 0x31, 0x63,
+ 0x03, 0x3a, 0x3c, 0xfe, 0x76, 0x55, 0xd2, 0x26,
+ 0xf0, 0xc9, 0xb5, 0xc6, 0xf0, 0x1e, 0xc3, 0x72,
+ 0xfb, 0x64, 0x94, 0x7d, 0xf1, 0x5e, 0x2a, 0x9e,
+ 0x0d, 0x9a, 0x7a, 0xe0, 0xbc, 0x7b, 0xa6, 0x65,
+ 0x41, 0xc0, 0xa0, 0x9d, 0xb1, 0xb1, 0x09, 0x99,
+ 0x6e, 0xe7, 0x25, 0x5e, 0x64, 0x2b, 0x74, 0xfa,
+ 0xa1, 0x9a, 0x03, 0x33, 0x88, 0x81, 0x27, 0x48,
+ 0xdd, 0x53, 0x77, 0x0b, 0xef, 0xd9, 0x2f, 0xfa,
+ 0xc8, 0x50, 0x0e, 0x08, 0xa1, 0x45, 0x12, 0x82,
+ 0x2b, 0xfb, 0x85, 0x5a, 0x39, 0x8c, 0x71, 0x32,
+ 0x59, 0x27, 0x37, 0x53, 0xce, 0x3e, 0xae, 0x00,
+ 0x45, 0x53, 0xfd, 0xaf, 0xa5, 0xd1, 0x1a, 0xe9,
+ 0xa4, 0x1b, 0xe3, 0x99, 0xde, 0xcd, 0x03, 0x36,
+ 0x6b, 0x72, 0x43, 0x76, 0x04, 0xa8, 0xf9, 0x83,
+ 0xef, 0x9e, 0x57, 0x75, 0x36, 0x0e, 0x99, 0xe1,
+ 0x79, 0x2b, 0x2b, 0x96, 0x01, 0x10, 0xb8, 0xf6,
+ 0x4a, 0xa6, 0x13, 0xab, 0x7f, 0x55, 0x60, 0xf0,
+ 0xc9, 0x5c, 0x81, 0xa7, 0x96, 0x99, 0xb4, 0x55,
+ 0x41, 0x48, 0xf1, 0xd4, 0xa1, 0xb4, 0x76, 0xb5,
+ 0x35, 0xe1, 0x02, 0x8e, 0x09, 0xb2, 0x6c, 0x11,
+ 0x3f, 0xfb, 0x04, 0x47, 0x98, 0xab, 0x9b, 0x55,
+ 0xc3, 0xa9, 0x2a, 0x64, 0x32, 0x5a, 0x69, 0x96,
+ 0x28, 0x8c, 0x5b, 0xe3, 0xb2, 0x60, 0x82, 0xec
+};
+
+/* 256-bit */
+static const uint8_t K6[] = {
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4
+};
+static const uint8_t P6[] = {
+ 0x0b, 0xe5, 0x48, 0xa6, 0xa1, 0xbc, 0xac, 0x81,
+ 0x80, 0x06, 0x5f, 0xae, 0x1e, 0x3f, 0x55, 0x73,
+ 0x6d, 0x36, 0x7f, 0x57, 0x3d, 0xa4, 0x4a, 0x6b,
+ 0xb6, 0x65, 0x2f, 0xb7, 0xe8, 0x85, 0x47, 0xe2,
+ 0x41, 0x42, 0xc2, 0x4e, 0x58, 0xf1, 0xde, 0x42,
+ 0x9f, 0x15, 0x4c, 0xaf, 0xea, 0x04, 0x20, 0xd0,
+ 0x1a, 0x19, 0x36, 0x74, 0x71, 0x12, 0x72, 0x1b,
+ 0xdb, 0x18, 0xf9, 0x0b, 0xb3, 0xf3, 0x63, 0xd4,
+ 0x62, 0x52, 0x8b, 0x63, 0x0f, 0x6b, 0x4d, 0xb9,
+ 0x70, 0xd6, 0x91, 0xa0, 0x43, 0x3f, 0x46, 0xfe,
+ 0x43, 0xbb, 0xb8, 0xdc, 0x5e, 0xdb, 0xd4, 0x1f,
+ 0xf0, 0x17, 0x94, 0x25, 0xee, 0x55, 0x67, 0xbf,
+ 0x4d, 0xda, 0x9d, 0xe7, 0x4b, 0xc6, 0x7a, 0xcf,
+ 0x8f, 0xd7, 0xbb, 0x29, 0x6e, 0x26, 0xd4, 0xc3,
+ 0x08, 0x9b, 0x67, 0x15, 0xe9, 0x2d, 0x9f, 0x2d,
+ 0x3c, 0x76, 0x26, 0xd3, 0xda, 0xfe, 0x6e, 0x73,
+ 0x9d, 0x09, 0x60, 0x4b, 0x35, 0x60, 0xdb, 0x77,
+ 0xb6, 0xc0, 0x45, 0x91, 0xf9, 0x14, 0x8a, 0x7a,
+ 0xdd, 0xe2, 0xf1, 0xdf, 0x8f, 0x12, 0x4f, 0xd7,
+ 0x75, 0xd6, 0x9a, 0x17, 0xda, 0x76, 0x88, 0xf0,
+ 0xfa, 0x44, 0x27, 0xbe, 0x61, 0xaf, 0x55, 0x9f,
+ 0xc7, 0xf0, 0x76, 0x77, 0xde, 0xca, 0xd1, 0x47,
+ 0x51, 0x55, 0xb1, 0xbf, 0xfa, 0x1e, 0xca, 0x28,
+ 0x17, 0x70, 0xf3, 0xb5, 0xd4, 0x32, 0x47, 0x04,
+ 0xe0, 0x92, 0xd8, 0xa5, 0x03, 0x69, 0x46, 0x99,
+ 0x7f, 0x1e, 0x3f, 0xb2, 0x93, 0x36, 0xa3, 0x88,
+ 0x75, 0x07, 0x68, 0xb8, 0x33, 0xce, 0x17, 0x3f,
+ 0x5c, 0xb7, 0x1e, 0x93, 0x38, 0xc5, 0x1d, 0x79,
+ 0x86, 0x7c, 0x9d, 0x9e, 0x2f, 0x69, 0x38, 0x0f,
+ 0x97, 0x5c, 0x67, 0xbf, 0xa0, 0x8d, 0x37, 0x0b,
+ 0xd3, 0xb1, 0x04, 0x87, 0x1d, 0x74, 0xfe, 0x30,
+ 0xfb, 0xd0, 0x22, 0x92, 0xf9, 0xf3, 0x23, 0xc9
+};
+static const uint8_t C6[] = {
+ 0x4b, 0xc0, 0x1f, 0x80, 0xf5, 0xc7, 0xe8, 0xf5,
+ 0xc9, 0xd0, 0x3c, 0x86, 0x50, 0x78, 0x21, 0xce,
+ 0x01, 0xec, 0x91, 0x00, 0xc9, 0xf8, 0x73, 0x43,
+ 0x2f, 0x73, 0x8a, 0x6d, 0xee, 0xed, 0x2d, 0x40,
+ 0x17, 0x16, 0x93, 0x15, 0xac, 0xed, 0x28, 0x61,
+ 0xb0, 0x0f, 0xa2, 0xe1, 0xd3, 0x80, 0x51, 0xdf,
+ 0x73, 0xce, 0x48, 0x4c, 0x1c, 0xc1, 0x8b, 0xc9,
+ 0x9e, 0x5c, 0x48, 0x07, 0xa0, 0xf6, 0x29, 0xf8,
+ 0x63, 0x87, 0xe4, 0xe7, 0x8b, 0xf8, 0xcf, 0x58,
+ 0xda, 0x57, 0x62, 0x11, 0x2e, 0x6e, 0x91, 0x7e,
+ 0xc7, 0x73, 0xdb, 0x27, 0x3c, 0x64, 0x72, 0x52,
+ 0xe3, 0x27, 0x84, 0x1f, 0x73, 0x3f, 0xf4, 0x94,
+ 0xd2, 0xdd, 0x93, 0x33, 0x65, 0x91, 0x98, 0x89,
+ 0x13, 0xa9, 0x2b, 0x0d, 0x6f, 0x56, 0x51, 0x15,
+ 0x07, 0xc6, 0xa7, 0x36, 0x8f, 0x0c, 0xd6, 0xc2,
+ 0x07, 0x06, 0x65, 0x7a, 0xf8, 0x94, 0xa6, 0x75,
+ 0x48, 0x4c, 0xcc, 0xa5, 0xa9, 0x91, 0x04, 0x2f,
+ 0x7b, 0x89, 0x46, 0xd2, 0x87, 0xcb, 0xd6, 0x1b,
+ 0xf3, 0x1e, 0xa7, 0xe5, 0x09, 0xcf, 0x75, 0x05,
+ 0x9f, 0xc9, 0xac, 0xcc, 0x61, 0x15, 0x2d, 0x2e,
+ 0x2c, 0x0a, 0x57, 0x4d, 0x33, 0x17, 0x6b, 0x22,
+ 0x9e, 0x92, 0xc5, 0x81, 0xce, 0x9d, 0x52, 0x68,
+ 0x7d, 0x98, 0xe1, 0x23, 0x70, 0xc5, 0x19, 0x3e,
+ 0x91, 0xfc, 0xc6, 0xd7, 0x67, 0x5f, 0xbb, 0x57,
+ 0x20, 0x96, 0x3f, 0x1f, 0x9f, 0x64, 0xe9, 0xb1,
+ 0x51, 0xfd, 0x8c, 0xc1, 0x0f, 0x50, 0xbe, 0x43,
+ 0x5f, 0x90, 0xb4, 0xd1, 0xb6, 0x41, 0x7c, 0x37,
+ 0x92, 0x71, 0xda, 0x9d, 0xfd, 0xee, 0x69, 0x8c,
+ 0x24, 0x18, 0xe8, 0x81, 0x60, 0xe2, 0x89, 0x33,
+ 0x42, 0xd4, 0x1b, 0x6a, 0xcb, 0x4a, 0x5b, 0x00,
+ 0x01, 0x4f, 0x11, 0x47, 0x0f, 0x57, 0xb0, 0x90,
+ 0xf0, 0xed, 0xb0, 0x34, 0x2e, 0x9f, 0x81, 0x6c
+};
+
+/* 128-bit */
+static const uint8_t K7[] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+};
+static const uint8_t P7[] = {
+ 0xdd, 0x14, 0xde, 0x30, 0xe0, 0xfd, 0x7b, 0x2a,
+ 0x94, 0x8e, 0x28, 0xa0, 0xf6, 0x93, 0x6e, 0xf5,
+ 0x92, 0x65, 0x1d, 0x5e, 0x78, 0x2a, 0x9d, 0x39,
+ 0xfc, 0xb8, 0x6d, 0x8b, 0xa5, 0xf4, 0x4b, 0x21,
+ 0xdd, 0x4e, 0xe9, 0xeb, 0xd7, 0xa7, 0xa1, 0x59,
+ 0xdc, 0x4c, 0x5e, 0xcc, 0x83, 0xab, 0xd3, 0x45,
+ 0xfe, 0x2c, 0x73, 0x23, 0xea, 0x45, 0xcb, 0x0c,
+ 0x12, 0x67, 0x28, 0xcd, 0xef, 0x4e, 0xca, 0xe2,
+ 0x1d, 0x92, 0x82, 0xd8, 0x0f, 0xa9, 0x36, 0x23,
+ 0x6d, 0x38, 0x68, 0xac, 0xa0, 0xeb, 0xdc, 0xcc,
+ 0xdf, 0xb8, 0x3a, 0x53, 0x04, 0x1a, 0x55, 0x27,
+ 0x8e, 0x22, 0x86, 0x8c, 0xbd, 0xdc, 0x6b, 0x12,
+ 0x9c, 0x69, 0xd2, 0x7a, 0x4b, 0x52, 0x5d, 0x76,
+ 0x34, 0xb9, 0x5e, 0x30, 0x0a, 0x8d, 0x1e, 0xf1,
+ 0x27, 0xda, 0x5b, 0xb9, 0x5e, 0xbf, 0x65, 0x34,
+ 0x00, 0xb6, 0xd2, 0xb0, 0x89, 0x12, 0xb6, 0x35,
+ 0xae, 0x27, 0x7f, 0x11, 0xe9, 0xf9, 0x1c, 0x71,
+ 0xc9, 0x50, 0xfe, 0xd4, 0x76, 0x50, 0x95, 0xf7,
+ 0xe1, 0x1c, 0x14, 0xcd, 0x67, 0x0f, 0xf0, 0x6d,
+ 0xa2, 0x93, 0x7b, 0x2c, 0x8d, 0x83, 0x5c, 0xff,
+ 0xe4, 0x95, 0xf3, 0xa1, 0xfd, 0x00, 0x77, 0x68,
+ 0x41, 0xb4, 0xfb, 0x81, 0xf4, 0x61, 0x1a, 0x84,
+ 0x5a, 0x53, 0xc3, 0xdc, 0xba, 0x0d, 0x67, 0x2e,
+ 0xcf, 0xf2, 0x30, 0xf5, 0x1d, 0xe9, 0xc4, 0x2c,
+ 0xac, 0x1f, 0xa7, 0x9c, 0x64, 0xfd, 0x45, 0x30,
+ 0x1b, 0xa1, 0x3b, 0x3d, 0xc7, 0xf5, 0xf9, 0xbb,
+ 0xba, 0x99, 0xa4, 0x12, 0x6e, 0x4e, 0xea, 0x0b,
+ 0x29, 0x7f, 0xcd, 0x84, 0x64, 0x50, 0x40, 0xb7,
+ 0x6a, 0x24, 0x29, 0xa4, 0xa7, 0xa1, 0xef, 0xa9,
+ 0xcf, 0xdf, 0x09, 0xff, 0xaa, 0x17, 0x5d, 0x82,
+ 0x74, 0xf5, 0xae, 0xd0, 0xe9, 0xec, 0xad, 0x5e,
+ 0xa7, 0x84, 0xda, 0xe7, 0x33, 0x58, 0x7e, 0x00,
+ 0x45, 0x5f, 0xbb, 0x15, 0xa3, 0x65, 0x0e, 0xf5,
+ 0x7e, 0x27, 0xe7, 0x04, 0x52, 0x58, 0x81, 0xd0,
+ 0xee, 0x8f, 0xaf, 0xe2, 0x3c, 0xbe, 0x08, 0x97,
+ 0x8a, 0x97, 0x12, 0xb0, 0x09, 0xfe, 0xa5, 0xeb,
+ 0xd1, 0x9c, 0x30, 0xe8, 0x9a, 0x3f, 0xe0, 0x38,
+ 0x34, 0x2b, 0xad, 0xb7, 0xc4, 0xda, 0x54, 0xab,
+ 0x97, 0x9c, 0x46, 0x2b, 0x2c, 0x0b, 0xb3, 0x49,
+ 0xcd, 0x9d, 0x32, 0x38, 0x3c, 0x1a, 0x49, 0xdc,
+ 0x2f, 0xe7, 0xcd, 0x8a, 0xb0, 0x76, 0xcf, 0x30,
+ 0xea, 0x0b, 0xb0, 0xb7, 0x63, 0xed, 0xb2, 0x8c,
+ 0xc9, 0x2c, 0xb7, 0x75, 0xa8, 0xf6, 0x63, 0xb6,
+ 0xcd, 0xb5, 0x63, 0xfb, 0x5f, 0x89, 0xae, 0x3d,
+ 0x33, 0x73, 0xaf, 0xde, 0xcb, 0x37, 0x0a, 0x50,
+ 0x6f, 0xae, 0xf3, 0xa6, 0x79, 0x85, 0xdd, 0xc5,
+ 0x24, 0xc5, 0x29, 0x23, 0x64, 0xef, 0x43, 0xd7,
+ 0xc4, 0xab, 0xd8, 0xb0, 0x84, 0x26, 0x6b, 0xe8,
+ 0xb1, 0x5d, 0xb5, 0x69, 0xfb, 0x97, 0x0e, 0x20,
+ 0xb3, 0xc1, 0x60, 0xad, 0x1a, 0xd2, 0xd6, 0x3a,
+ 0x73, 0x08, 0xf0, 0x47, 0x5f, 0xcf, 0x15, 0xf7,
+ 0x7b, 0xf3, 0x69, 0x08, 0x5a, 0x6b, 0x9f, 0xc7,
+ 0x12, 0xa1, 0xf0, 0xfb, 0x91, 0xc9, 0x07, 0x61,
+ 0x21, 0xa0, 0x30, 0x4c, 0x16, 0x81, 0xcd, 0x3c,
+ 0x61, 0xe8, 0x96, 0x91, 0x30, 0xdd, 0x0c, 0x0e,
+ 0x0b, 0xa1, 0x33, 0x95, 0x67, 0x99, 0xd6, 0x1e,
+ 0x1a, 0xb3, 0x12, 0xfd, 0xad, 0x46, 0x48, 0x87,
+ 0x5e, 0xe8, 0xd4, 0xf5, 0xac, 0xdf, 0xa7, 0x37,
+ 0xb8, 0xa1, 0x62, 0x8c, 0xb8, 0xb6, 0xb0, 0x69,
+ 0x63, 0x29, 0x60, 0x64, 0x26, 0xc3, 0xf8, 0x18,
+ 0x8e, 0x46, 0xa0, 0xc5, 0x45, 0x5c, 0x08, 0x2a,
+ 0xed, 0x29, 0x84, 0x11, 0xea, 0x59, 0xc0, 0x16,
+ 0xe2, 0x04, 0x30, 0x63, 0x22, 0x87, 0xb6, 0xc7,
+ 0x81, 0xa6, 0x58, 0xc0, 0xb2, 0xb0, 0x7d, 0xbc,
+ 0x16, 0x44, 0x6e, 0x5d, 0x6d, 0xce, 0x2a, 0xe0,
+ 0x20, 0x69, 0x35, 0xa1, 0x5d, 0x17, 0x48, 0x55,
+ 0x88, 0xfe, 0xde, 0x34, 0xe7, 0x18, 0xbf, 0x7e,
+ 0x0a, 0x1c, 0x32, 0x88, 0xab, 0xde, 0xe1, 0x02,
+ 0x61, 0x09, 0x58, 0x96, 0xef, 0x16, 0x73, 0xac,
+ 0xc0, 0x5c, 0x15, 0xca, 0x9b, 0xea, 0x0e, 0x05,
+ 0x97, 0x88, 0x09, 0xc5, 0xd0, 0x95, 0x90, 0xae,
+ 0xa5, 0xb5, 0x28, 0xc6, 0x5a, 0x7b, 0xb3, 0xcc,
+ 0xae, 0x57, 0x71, 0x83, 0x56, 0x57, 0xca, 0xe8,
+ 0x8b, 0x21, 0x0c, 0x37, 0x1d, 0xde, 0x85, 0xe2,
+ 0x1b, 0xa2, 0x38, 0xa0, 0xc5, 0xc7, 0x98, 0x7b,
+ 0xf9, 0x5e, 0x6a, 0x68, 0xb3, 0xed, 0x49, 0x5e,
+ 0x46, 0xb9, 0xc9, 0xf6, 0x34, 0xa6, 0x0e, 0xac,
+ 0x90, 0x72, 0xcf, 0xf8, 0x5b, 0x48, 0x13, 0x40,
+ 0x7a, 0xce, 0xfd, 0x3c, 0x16, 0xff, 0xb5, 0xea,
+ 0xb2, 0x56, 0x47, 0xcc, 0x9f, 0xbc, 0xae, 0x4a,
+ 0xc8, 0xa5, 0x59, 0x57, 0x01, 0xd7, 0x9f, 0xd7,
+ 0xbf, 0x13, 0xb1, 0xbf, 0xb7, 0x9a, 0xa0, 0xa1,
+ 0xc6, 0x66, 0x61, 0x96, 0xf2, 0xcd, 0x8c, 0xcb,
+ 0x3c, 0x67, 0xb5, 0xed, 0xb7, 0xa2, 0x54, 0x84,
+ 0x3c, 0xcb, 0x7e, 0xb3, 0x97, 0x05, 0xcb, 0x8f,
+ 0xa9, 0xc6, 0x3c, 0xa2, 0xbd, 0xbf, 0x3a, 0xb8,
+ 0x92, 0x08, 0x01, 0xea, 0xfd, 0x55, 0x2f, 0x27,
+ 0x2a, 0x82, 0x38, 0x26, 0x1d, 0x81, 0x19, 0x33,
+ 0x75, 0x3c, 0xa2, 0x13, 0x1e, 0x58, 0x9f, 0x0b,
+ 0x08, 0x5d, 0x7a, 0x2c, 0x9a, 0xd1, 0xa5, 0x4c,
+ 0x41, 0xb4, 0x1d, 0xf8, 0x42, 0x08, 0x87, 0xdd,
+ 0x8e, 0xc9, 0x05, 0xd2, 0x8c, 0xba, 0x93, 0x28,
+ 0xbe, 0x4a, 0x14, 0x13, 0x2a, 0x58, 0xf0, 0x1c,
+ 0xac, 0xc1, 0xc4, 0x49, 0xbc, 0xe1, 0xda, 0xb6,
+ 0x2d, 0x06, 0x98, 0x32, 0xea, 0xa3, 0x89, 0x11,
+ 0xca, 0x5f, 0x3e, 0xda, 0x24, 0xe2, 0xdb, 0x1e,
+ 0xca, 0xf3, 0xc0, 0xc7, 0x64, 0xee, 0x4b, 0x3d,
+ 0xa2, 0xee, 0x69, 0xb0, 0x3f, 0x2c, 0xd5, 0x49,
+ 0xba, 0x2d, 0x45, 0x7d, 0xdd, 0xb0, 0x0d, 0xc5,
+ 0xe0, 0x57, 0x95, 0xbe, 0xf8, 0x4a, 0x11, 0x46,
+ 0x4c, 0xbb, 0xdf, 0xa8, 0x5a, 0xf9, 0xff, 0x0e,
+ 0x31, 0xa9, 0x50, 0x5d, 0xc4, 0xb3, 0x3d, 0x09,
+ 0x46, 0x33, 0x39, 0x31, 0xd5, 0xb3, 0xe5, 0x91,
+ 0xcf, 0xca, 0x8a, 0xe0, 0xc2, 0x8e, 0xea, 0xbe,
+ 0x54, 0x64, 0x78, 0x0c, 0x25, 0x1c, 0x17, 0xbc,
+ 0x49, 0xf9, 0xc0, 0x30, 0x5f, 0x08, 0x04, 0x9d,
+ 0xb5, 0xe4, 0xeb, 0x9e, 0xe5, 0x1e, 0x6d, 0xbc,
+ 0x7b, 0xe7, 0xf0, 0xd1, 0xa0, 0x01, 0x18, 0x51,
+ 0x4f, 0x64, 0xc3, 0x9c, 0x70, 0x25, 0x4f, 0xed,
+ 0xc7, 0xbc, 0x19, 0x00, 0x09, 0x22, 0x97, 0x5d,
+ 0x6f, 0xe4, 0x47, 0x98, 0x05, 0xcd, 0xcc, 0xde,
+ 0xd5, 0xe3, 0xaf, 0xa3, 0xde, 0x69, 0x99, 0x2a,
+ 0xd1, 0x28, 0x4d, 0x7c, 0x89, 0xa0, 0xdb, 0xae,
+ 0xf9, 0xf1, 0x4a, 0x46, 0xdf, 0xbe, 0x1d, 0x37,
+ 0xf2, 0xd5, 0x36, 0x4a, 0x54, 0xe8, 0xc4, 0xfb,
+ 0x57, 0x77, 0x09, 0x05, 0x31, 0x99, 0xaf, 0x9a,
+ 0x17, 0xd1, 0x20, 0x93, 0x31, 0x89, 0xff, 0xed,
+ 0x0f, 0xf8, 0xed, 0xb3, 0xcf, 0x4c, 0x9a, 0x74,
+ 0xbb, 0x00, 0x36, 0x41, 0xd1, 0x13, 0x68, 0x73,
+ 0x78, 0x63, 0x42, 0xdd, 0x99, 0x15, 0x9a, 0xf4,
+ 0xe1, 0xad, 0x6d, 0xf6, 0x5e, 0xca, 0x20, 0x24,
+ 0xd7, 0x9d, 0x2f, 0x58, 0x97, 0xf7, 0xde, 0x31,
+ 0x51, 0xa3, 0x1c, 0xe2, 0x66, 0x24, 0x4b, 0xa1,
+ 0x56, 0x02, 0x32, 0xf4, 0x89, 0xf3, 0x86, 0x9a,
+ 0x85, 0xda, 0x95, 0xa8, 0x7f, 0x6a, 0x77, 0x02,
+ 0x3a, 0xba, 0xe0, 0xbe, 0x34, 0x5c, 0x9a, 0x1a
+};
+static const uint8_t C7[] = {
+ 0x62, 0xa1, 0xcc, 0x1e, 0x1b, 0xc3, 0xb1, 0x11,
+ 0xb5, 0x11, 0x4c, 0x37, 0xbf, 0xd0, 0x0c, 0xef,
+ 0x36, 0x9f, 0x99, 0x49, 0x38, 0xc2, 0x62, 0xbd,
+ 0x3e, 0x03, 0xd1, 0x02, 0xa2, 0x18, 0xdc, 0x58,
+ 0x9c, 0x01, 0x99, 0xd8, 0x47, 0xeb, 0x27, 0xce,
+ 0x76, 0x84, 0xa5, 0xab, 0xb7, 0x9b, 0xbb, 0x98,
+ 0xc9, 0x84, 0x02, 0x6e, 0x32, 0x65, 0xc9, 0xcb,
+ 0xca, 0xc7, 0xa5, 0x95, 0x11, 0xcc, 0x0a, 0x9d,
+ 0x5e, 0xea, 0xba, 0x59, 0xef, 0x25, 0xc0, 0x2d,
+ 0x8b, 0xa2, 0xec, 0x2f, 0x34, 0xea, 0x7c, 0xef,
+ 0xee, 0x2a, 0x57, 0x80, 0xc4, 0xca, 0x5e, 0x08,
+ 0x8c, 0x12, 0x13, 0x39, 0xd1, 0xc7, 0x96, 0x93,
+ 0x41, 0x22, 0x97, 0x1c, 0x7d, 0xe0, 0x47, 0xab,
+ 0xfa, 0xd7, 0xc6, 0x38, 0x5a, 0x39, 0xdb, 0x4c,
+ 0xd4, 0x6d, 0x50, 0x2b, 0x8f, 0xb1, 0x92, 0x06,
+ 0x01, 0xbf, 0xdc, 0x14, 0x5c, 0x32, 0xee, 0xb0,
+ 0x6a, 0x36, 0xe8, 0xe9, 0xf3, 0x12, 0x9f, 0x1f,
+ 0x00, 0xe5, 0x25, 0x3b, 0x52, 0x74, 0xba, 0x50,
+ 0x17, 0x81, 0x60, 0x5c, 0x15, 0xec, 0x4d, 0xb0,
+ 0x6a, 0xa1, 0xdd, 0xb4, 0xa2, 0x71, 0x01, 0xb8,
+ 0x8b, 0x59, 0x93, 0x58, 0x23, 0xd6, 0x38, 0xbf,
+ 0x49, 0x94, 0xb7, 0x6e, 0x22, 0x75, 0x68, 0x1f,
+ 0x15, 0x2c, 0xc4, 0x46, 0x44, 0x35, 0xc8, 0x7a,
+ 0x40, 0x2e, 0x55, 0x3f, 0x67, 0x4d, 0x12, 0x21,
+ 0xf6, 0xb1, 0x20, 0x47, 0x4f, 0x35, 0xe4, 0x96,
+ 0xf9, 0xa2, 0xdc, 0x4c, 0xe3, 0xa2, 0x13, 0x41,
+ 0xed, 0x6d, 0x86, 0x80, 0x23, 0xe5, 0x2a, 0xd1,
+ 0xa0, 0x69, 0x8f, 0x7e, 0x22, 0x3f, 0xf1, 0x65,
+ 0x9f, 0xd7, 0x86, 0xa8, 0x78, 0x57, 0x49, 0x74,
+ 0x91, 0x52, 0x91, 0xe7, 0x1e, 0xe2, 0x14, 0xe9,
+ 0x88, 0xe1, 0x67, 0x12, 0x3d, 0x0a, 0x22, 0x31,
+ 0x56, 0x2e, 0x36, 0xd4, 0x45, 0xc9, 0x9b, 0x7b,
+ 0x09, 0x53, 0x55, 0x36, 0xed, 0xa3, 0xc2, 0x22,
+ 0xac, 0x00, 0x5e, 0x57, 0xc8, 0x40, 0x65, 0xd2,
+ 0x62, 0x61, 0x35, 0xf2, 0xe8, 0x4f, 0xb3, 0x9d,
+ 0x2c, 0xb2, 0x12, 0x5e, 0x15, 0x47, 0xd6, 0x1c,
+ 0x99, 0x80, 0xe0, 0x1c, 0x09, 0x28, 0xa0, 0x7e,
+ 0x6c, 0x96, 0xc9, 0x62, 0x33, 0xd3, 0xbe, 0x53,
+ 0x16, 0xa0, 0xf2, 0xa9, 0x42, 0x1c, 0x81, 0xa3,
+ 0x35, 0x9b, 0x93, 0x9e, 0xc6, 0xc0, 0x83, 0x03,
+ 0xb7, 0x39, 0x66, 0xc9, 0x86, 0xf8, 0x8d, 0xc0,
+ 0xe2, 0x88, 0xb4, 0x1f, 0x5d, 0x15, 0x80, 0x60,
+ 0x2d, 0x53, 0x1d, 0x60, 0x07, 0xbc, 0x72, 0x11,
+ 0xd0, 0x0e, 0xcb, 0x70, 0x9c, 0xa0, 0x48, 0x56,
+ 0x21, 0x5f, 0x18, 0xdd, 0xa3, 0x1d, 0xdb, 0xe0,
+ 0x41, 0x0c, 0x9e, 0xb9, 0xa2, 0x7e, 0x32, 0xb3,
+ 0x3e, 0x91, 0x9d, 0xf2, 0xa6, 0x0d, 0x8c, 0xea,
+ 0xae, 0x44, 0xb2, 0x0f, 0x11, 0x35, 0x27, 0x2e,
+ 0xb6, 0x3d, 0xe9, 0x63, 0x86, 0x2e, 0x81, 0xdc,
+ 0xfa, 0xb4, 0x52, 0x1d, 0x9c, 0xd5, 0x44, 0x95,
+ 0xc8, 0xd0, 0x66, 0x8a, 0xbd, 0xf6, 0xd1, 0xff,
+ 0xeb, 0x82, 0x68, 0x58, 0x7b, 0xec, 0x0e, 0x92,
+ 0x0e, 0x48, 0xd6, 0xff, 0x8d, 0xac, 0xc1, 0x41,
+ 0x84, 0x9e, 0x56, 0x54, 0xf9, 0xb5, 0x1c, 0xb0,
+ 0x9f, 0xde, 0xfe, 0x14, 0x42, 0x0d, 0x22, 0x12,
+ 0xf2, 0x7d, 0x7b, 0xc3, 0x2e, 0x72, 0x27, 0x76,
+ 0x12, 0xdf, 0x57, 0x2f, 0x97, 0x82, 0x9b, 0xcf,
+ 0x75, 0x1a, 0x4a, 0x0c, 0xad, 0x29, 0x56, 0x4c,
+ 0x74, 0xaf, 0x95, 0x03, 0xff, 0x9f, 0x9d, 0xc3,
+ 0x2e, 0x9c, 0x1a, 0x42, 0x75, 0xe1, 0x59, 0xc9,
+ 0x05, 0x12, 0x6c, 0xea, 0x2b, 0x2f, 0x89, 0xfc,
+ 0xa4, 0x73, 0xc8, 0xdc, 0xf6, 0xd5, 0x50, 0x19,
+ 0x22, 0x80, 0xbc, 0x08, 0x48, 0xb4, 0x45, 0x47,
+ 0x25, 0x01, 0xa9, 0x55, 0x7b, 0x66, 0xbd, 0x84,
+ 0x0f, 0x16, 0xfa, 0x44, 0x23, 0x51, 0x6f, 0xed,
+ 0x35, 0x0e, 0x88, 0x4d, 0xda, 0xe8, 0x27, 0x94,
+ 0xbd, 0x68, 0x46, 0x28, 0x79, 0x8c, 0x03, 0x03,
+ 0xf0, 0x81, 0xac, 0xbc, 0xc2, 0xdd, 0xa8, 0x98,
+ 0xdf, 0xe3, 0x1c, 0x1c, 0x4b, 0x43, 0x9e, 0x7b,
+ 0x26, 0x3c, 0xe9, 0xff, 0x3b, 0xee, 0x35, 0xe6,
+ 0x2a, 0xcf, 0xdc, 0x17, 0x85, 0x99, 0x9e, 0x88,
+ 0x5c, 0x38, 0x4c, 0x56, 0x4a, 0x06, 0xeb, 0x28,
+ 0xf7, 0xb5, 0x97, 0x04, 0xd4, 0x05, 0x85, 0xee,
+ 0x90, 0xd7, 0xe2, 0x10, 0x8a, 0x86, 0xb2, 0x3f,
+ 0xbf, 0x3f, 0x6a, 0xe6, 0xeb, 0xc1, 0x42, 0x97,
+ 0xcb, 0x30, 0x41, 0x44, 0x79, 0x44, 0x7e, 0x1e,
+ 0x3e, 0x55, 0xe5, 0xc8, 0xd5, 0xec, 0x64, 0x3d,
+ 0x09, 0x69, 0xea, 0xdb, 0xe5, 0x08, 0x33, 0x00,
+ 0x79, 0x1b, 0x31, 0xf2, 0x3d, 0xbd, 0x73, 0xe6,
+ 0x0e, 0xc1, 0xb9, 0x45, 0xbf, 0xa5, 0x52, 0x5a,
+ 0xcd, 0x71, 0x7a, 0x2e, 0x20, 0x1e, 0xbf, 0xff,
+ 0x42, 0x0a, 0x6a, 0x1b, 0xa4, 0xad, 0x79, 0x3d,
+ 0x34, 0x54, 0x73, 0xe2, 0xd6, 0x6f, 0xb0, 0xcc,
+ 0xc0, 0x8a, 0x56, 0x3d, 0x4d, 0x90, 0x35, 0xe3,
+ 0x4b, 0xcc, 0x40, 0x40, 0xbc, 0xcf, 0x93, 0xa0,
+ 0xbd, 0x5c, 0xed, 0x22, 0x57, 0x92, 0x5c, 0x8d,
+ 0xfb, 0x67, 0x9e, 0xab, 0x40, 0xc9, 0xed, 0x7c,
+ 0xa1, 0xb6, 0x36, 0xb2, 0xcb, 0xbc, 0xf2, 0x1a,
+ 0x46, 0x6c, 0x1f, 0xb3, 0xe4, 0xf6, 0x4c, 0x7a,
+ 0x10, 0x81, 0x16, 0x93, 0x77, 0xa3, 0xa1, 0x07,
+ 0xec, 0xc8, 0x01, 0x76, 0xf8, 0xe3, 0xe6, 0xae,
+ 0xaf, 0x90, 0x98, 0x3a, 0xbd, 0x7d, 0x28, 0x57,
+ 0xb4, 0xc5, 0xfe, 0x13, 0xab, 0x6c, 0x77, 0xc1,
+ 0xc3, 0x47, 0x1d, 0x34, 0x2f, 0xdd, 0xe1, 0x7b,
+ 0x8b, 0x65, 0xc4, 0xe3, 0x45, 0xda, 0x6e, 0xba,
+ 0x37, 0xb1, 0x37, 0xbf, 0x63, 0x1d, 0x39, 0x77,
+ 0xf0, 0xa8, 0xf8, 0xda, 0x91, 0xd3, 0x27, 0xb9,
+ 0x29, 0x70, 0xf7, 0xae, 0x11, 0x6d, 0x8a, 0x8f,
+ 0x2f, 0x3a, 0xe1, 0xb8, 0x9b, 0xb5, 0x2a, 0xa8,
+ 0x7b, 0x86, 0x49, 0xca, 0x0c, 0x95, 0x17, 0x1e,
+ 0xaf, 0x9c, 0x52, 0x6b, 0x68, 0xae, 0xe3, 0xc3,
+ 0xc9, 0x8c, 0x89, 0x4b, 0xf2, 0xfb, 0xb1, 0xae,
+ 0x2f, 0x80, 0xf9, 0xa3, 0xf4, 0x10, 0x09, 0x36,
+ 0x81, 0x27, 0x06, 0x6d, 0xe9, 0x79, 0x8e, 0xa4,
+ 0x8e, 0x12, 0xfa, 0x03, 0x8e, 0x69, 0x4c, 0x7e,
+ 0xc5, 0x10, 0xd5, 0x00, 0x64, 0x87, 0xf8, 0x10,
+ 0x8a, 0x8e, 0x96, 0x9e, 0xc8, 0xac, 0x42, 0x75,
+ 0x97, 0x6d, 0x62, 0x3f, 0xa3, 0x29, 0x11, 0xd2,
+ 0x73, 0xd3, 0x95, 0xef, 0xb4, 0x64, 0xa4, 0x37,
+ 0x09, 0x15, 0x42, 0x7f, 0xc4, 0x46, 0x8b, 0x80,
+ 0xa8, 0xd9, 0x2a, 0xfc, 0x38, 0x8f, 0xf9, 0xc1,
+ 0xc5, 0x95, 0xad, 0x62, 0xc9, 0x6c, 0x60, 0x0b,
+ 0x30, 0x04, 0x8c, 0x88, 0xb5, 0x0b, 0x73, 0x23,
+ 0xa4, 0xe0, 0xb7, 0x6e, 0x4c, 0x78, 0xe5, 0x0a,
+ 0xfb, 0xe1, 0xc4, 0xeb, 0x1a, 0xb4, 0xd8, 0x3c,
+ 0x06, 0xb0, 0x00, 0x23, 0x86, 0xb0, 0xb4, 0x9d,
+ 0x33, 0xe4, 0x21, 0xca, 0xf2, 0xad, 0x14, 0x07,
+ 0x82, 0x25, 0xde, 0x85, 0xe4, 0x58, 0x56, 0x93,
+ 0x09, 0x3a, 0xeb, 0xde, 0x46, 0x77, 0x76, 0xa2,
+ 0x35, 0x39, 0xd0, 0xf6, 0x10, 0x81, 0x73, 0x3f,
+ 0x22, 0x3b, 0xeb, 0xca, 0x00, 0x19, 0x38, 0x89,
+ 0x26, 0x29, 0x7d, 0x6f, 0x70, 0xa6, 0xbb, 0x52,
+ 0x58, 0xb1, 0x0a, 0x85, 0xe9, 0x0b, 0x74, 0x2f,
+ 0x08, 0xe8, 0xa4, 0x4d, 0xa1, 0xcf, 0xf2, 0x75,
+ 0xed, 0x05, 0xae, 0x7f, 0x10, 0xb1, 0x71, 0x26,
+ 0xc5, 0xc7, 0xdc, 0xb0, 0x2d, 0x26, 0xf1, 0xb4
+};
+
+static const struct ecb_vector ecb_vectors[] = {
+ {K1, P1, sizeof(P1), C1, sizeof(K1)},
+ {K2, P2, sizeof(P2), C2, sizeof(K2)},
+ {K3, P3, sizeof(P3), C3, sizeof(K3)},
+ {K4, P4, sizeof(P4), C4, sizeof(K4)},
+ {K5, P5, sizeof(P5), C5, sizeof(K5)},
+ {K6, P6, sizeof(P6), C6, sizeof(K6)},
+ {K7, P7, sizeof(P7), C7, sizeof(K7)},
+};
+
+static int
+ecb_job_ok(const struct JOB_AES_HMAC *job,
+ const uint8_t *out_text,
+ const uint8_t *target,
+ const uint8_t *padding,
+ const size_t sizeof_padding,
+ const unsigned text_len)
+{
+ const int num = (const int)((uint64_t)job->user_data2);
+
+ if (job->status != STS_COMPLETED) {
+ printf("%d error status:%d, job %d",
+ __LINE__, job->status, num);
+ return 0;
+ }
+ if (memcmp(out_text, target + sizeof_padding,
+ text_len)) {
+ printf("%d mismatched\n", num);
+ return 0;
+ }
+ if (memcmp(padding, target, sizeof_padding)) {
+ printf("%d overwrite head\n", num);
+ return 0;
+ }
+ if (memcmp(padding,
+ target + sizeof_padding + text_len,
+ sizeof_padding)) {
+ printf("%d overwrite tail\n", num);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_ecb_many(struct MB_MGR *mb_mgr,
+ void *enc_keys,
+ void *dec_keys,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ unsigned text_len,
+ int dir,
+ int order,
+ JOB_CIPHER_MODE cipher,
+ const int in_place,
+ const int key_len,
+ const int num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **targets = malloc(num_jobs * sizeof(void *));
+ int i, jobs_rx = 0, ret = -1;
+
+ assert(targets != NULL);
+
+ memset(padding, -1, sizeof(padding));
+
+ for (i = 0; i < num_jobs; i++) {
+ targets[i] = malloc(text_len + (sizeof(padding) * 2));
+ memset(targets[i], -1, text_len + (sizeof(padding) * 2));
+ if (in_place) {
+ /* copy input text to the allocated buffer */
+ memcpy(targets[i] + sizeof(padding), in_text, text_len);
+ }
+ }
+
+ /* flush the scheduler */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ if (!in_place) {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = in_text;
+ } else {
+ job->dst = targets[i] + sizeof(padding);
+ job->src = targets[i] + sizeof(padding);
+ }
+ job->cipher_mode = cipher;
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->aes_key_len_in_bytes = key_len;
+
+ job->iv_len_in_bytes = 0;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = text_len;
+ job->user_data = targets[i];
+ job->user_data2 = (void *)((uint64_t)i);
+
+ job->hash_alg = NULL_HASH;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job != NULL) {
+ jobs_rx++;
+ if (!ecb_job_ok(job, out_text, job->user_data, padding,
+ sizeof(padding), text_len))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!ecb_job_ok(job, out_text, job->user_data, padding,
+ sizeof(padding), text_len))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++)
+ free(targets[i]);
+ free(targets);
+ return ret;
+}
+
+static int
+test_ecb_vectors(struct MB_MGR *mb_mgr, const int vec_cnt,
+ const struct ecb_vector *vec_tab, const char *banner,
+ const JOB_CIPHER_MODE cipher, const int num_jobs)
+{
+ int vect, errors = 0;
+ DECLARE_ALIGNED(uint32_t enc_keys[15*4], 16);
+ DECLARE_ALIGNED(uint32_t dec_keys[15*4], 16);
+
+ printf("%s (N jobs = %d):\n", banner, num_jobs);
+ for (vect = 0; vect < vec_cnt; vect++) {
+#ifdef DEBUG
+ printf("[%d/%d] Standard vector key_len:%d\n",
+ vect + 1, vec_cnt,
+ (int) vec_tab[vect].Klen);
+#else
+ printf(".");
+#endif
+ switch (vec_tab[vect].Klen) {
+ case 16:
+ IMB_AES_KEYEXP_128(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ case 24:
+ IMB_AES_KEYEXP_192(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ case 32:
+ default:
+ IMB_AES_KEYEXP_256(mb_mgr, vec_tab[vect].K, enc_keys,
+ dec_keys);
+ break;
+ }
+
+ if (test_ecb_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 0,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_ecb_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 0,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_ecb_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].P, vec_tab[vect].C,
+ (unsigned) vec_tab[vect].Plen,
+ ENCRYPT, CIPHER_HASH, cipher, 1,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d encrypt in-place\n", vect + 1);
+ errors++;
+ }
+
+ if (test_ecb_many(mb_mgr, enc_keys, dec_keys,
+ vec_tab[vect].C, vec_tab[vect].P,
+ (unsigned) vec_tab[vect].Plen,
+ DECRYPT, HASH_CIPHER, cipher, 1,
+ vec_tab[vect].Klen, num_jobs)) {
+ printf("error #%d decrypt in-place\n", vect + 1);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+ecb_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ const int num_jobs_tab[] = {
+ 1, 3, 4, 5, 7, 8, 9, 15, 16, 17
+ };
+ unsigned i;
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ for (i = 0; i < DIM(num_jobs_tab); i++)
+ errors += test_ecb_vectors(mb_mgr, DIM(ecb_vectors),
+ ecb_vectors,
+ "AES-ECB standard test vectors", ECB,
+ num_jobs_tab[i]);
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/gcm_ctr_vectors_test.h b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_ctr_vectors_test.h
new file mode 100644
index 000000000..b2685ff16
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_ctr_vectors_test.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCM_CTR_VECTORS_TEST_H_
+#define GCM_CTR_VECTORS_TEST_H_
+
+#include <stdint.h>
+
+enum arch_type {
+ ARCH_SSE = 0,
+ ARCH_AVX,
+ ARCH_AVX2,
+ ARCH_AVX512,
+ ARCH_NO_AESNI,
+ ARCH_NUMOF
+};
+
+enum key_size {
+ BITS_128 = 16,
+ BITS_192 = 24,
+ BITS_256 = 32,
+};
+
+#define KBITS(K) (sizeof(K))
+
+// struct to hold pointers to the key, plaintext and ciphertext vectors
+struct gcm_ctr_vector {
+ const uint8_t* K; // AES Key
+ enum key_size Klen; // length of key in bits
+ const uint8_t* IV; // initial value used by GCM
+ uint64_t IVlen; // length of IV in bytes
+ const uint8_t* A; // additional authenticated data
+ uint64_t Alen; // length of AAD in bytes
+ const uint8_t* P; // Plain text
+ uint64_t Plen; // length of our plaintext
+ //outputs of encryption
+ const uint8_t* C; // same length as PT
+ const uint8_t* T; // Authenication tag
+ uint8_t Tlen; // AT length can be 0 to 128bits
+};
+
+#define vector(N) \
+ {K##N, (KBITS(K##N)), IV##N, sizeof(IV##N), A##N, A##N##_len, \
+ P##N, sizeof(P##N), C##N, T##N, sizeof(T##N)}
+
+#define extra_vector(N) \
+ {K##N, (KBITS(K##N)), IV##N, sizeof(IV##N), A##N, A##N##_len, \
+ P##N, P##N##_len, C##N, T##N, sizeof(T##N)}
+struct MB_MGR;
+
+extern int gcm_test(MB_MGR *p_mgr);
+int ctr_test(const enum arch_type arch, struct MB_MGR *);
+
+#endif /* GCM_CTR_VECTORS_TEST_H_ */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/gcm_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_test.c
new file mode 100644
index 000000000..8deaa6f30
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_test.c
@@ -0,0 +1,1423 @@
+/**********************************************************************
+ Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h> /* for memcmp() */
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+
+/*
+ * 60-Byte Packet Encryption Using GCM-AES-128
+ * http://www.ieee802.org/1/files/public/
+ * docs2011/bn-randall-test-vectors-0511-v1.pdf
+ *
+ * K: AD7A2BD03EAC835A6F620FDCB506B345
+ * IV: 12153524C0895E81B2C28465
+ * AAD: D609B1F056637A0D46DF998D88E52E00
+ * B2C2846512153524C0895E81
+ * P: 08000F101112131415161718191A1B1C
+ * 1D1E1F202122232425262728292A2B2C
+ * 2D2E2F303132333435363738393A0002
+ * C: 701AFA1CC039C0D765128A665DAB6924
+ * 3899BF7318CCDC81C9931DA17FBE8EDD
+ * 7D17CB8B4C26FC81E3284F2B7FBA713D
+ * AT: 4F8D55E7D3F06FD5A13C0C29B9D5B880
+ * H: 73A23D80121DE2D5A850253FCF43120E
+ */
+static uint8_t K1[] = {
+ 0xAD, 0x7A, 0x2B, 0xD0, 0x3E, 0xAC, 0x83, 0x5A,
+ 0x6F, 0x62, 0x0F, 0xDC, 0xB5, 0x06, 0xB3, 0x45
+};
+static uint8_t P1[] = {
+ 0x08, 0x00, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24,
+ 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C,
+ 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x00, 0x02
+};
+static uint8_t IV1[] = {
+ 0x12, 0x15, 0x35, 0x24, 0xC0, 0x89, 0x5E, 0x81,
+ 0xB2, 0xC2, 0x84, 0x65
+};
+static uint8_t A1[] = {
+ 0xD6, 0x09, 0xB1, 0xF0, 0x56, 0x63, 0x7A, 0x0D,
+ 0x46, 0xDF, 0x99, 0x8D, 0x88, 0xE5, 0x2E, 0x00,
+ 0xB2, 0xC2, 0x84, 0x65, 0x12, 0x15, 0x35, 0x24,
+ 0xC0, 0x89, 0x5E, 0x81
+};
+
+#define A1_len sizeof(A1)
+
+static uint8_t C1[] = {
+ 0x70, 0x1A, 0xFA, 0x1C, 0xC0, 0x39, 0xC0, 0xD7,
+ 0x65, 0x12, 0x8A, 0x66, 0x5D, 0xAB, 0x69, 0x24,
+ 0x38, 0x99, 0xBF, 0x73, 0x18, 0xCC, 0xDC, 0x81,
+ 0xC9, 0x93, 0x1D, 0xA1, 0x7F, 0xBE, 0x8E, 0xDD,
+ 0x7D, 0x17, 0xCB, 0x8B, 0x4C, 0x26, 0xFC, 0x81,
+ 0xE3, 0x28, 0x4F, 0x2B, 0x7F, 0xBA, 0x71, 0x3D
+};
+static uint8_t T1[] = {
+ 0x4F, 0x8D, 0x55, 0xE7, 0xD3, 0xF0, 0x6F, 0xD5,
+ 0xA1, 0x3C, 0x0C, 0x29, 0xB9, 0xD5, 0xB8, 0x80
+};
+
+/*
+ * 54-Byte Packet Encryption Using GCM-AES-128
+ * http://www.ieee802.org/1/files/public/
+ * docs2011/bn-randall-test-vectors-0511-v1.pdf
+ *
+ * K: 071B113B0CA743FECCCF3D051F737382
+ * IV: F0761E8DCD3D000176D457ED
+ * AAD: E20106D7CD0DF0761E8DCD3D88E54C2A
+ * 76D457ED
+ * P: 08000F101112131415161718191A1B1C
+ * 1D1E1F202122232425262728292A2B2C
+ * 2D2E2F30313233340004
+ * C: 13B4C72B389DC5018E72A171DD85A5D3
+ * 752274D3A019FBCAED09A425CD9B2E1C
+ * 9B72EEE7C9DE7D52B3F3
+ * AT: D6A5284F4A6D3FE22A5D6C2B960494C3
+ * H: E4E01725D724C1215C7309AD34539257
+ */
+static uint8_t K2[] = {
+ 0x07, 0x1B, 0x11, 0x3B, 0x0C, 0xA7, 0x43, 0xFE,
+ 0xCC, 0xCF, 0x3D, 0x05, 0x1F, 0x73, 0x73, 0x82
+};
+static uint8_t P2[] = {
+ 0x08, 0x00, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
+ 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24,
+ 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C,
+ 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34,
+ 0x00, 0x04
+};
+static uint8_t IV2[] = {
+ 0xF0, 0x76, 0x1E, 0x8D, 0xCD, 0x3D, 0x00, 0x01,
+ 0x76, 0xD4, 0x57, 0xED
+};
+/* static uint8_t IV1p[] = {0, 0, 0, 1}; */
+static uint8_t A2[] = {
+ 0xE2, 0x01, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76,
+ 0x1E, 0x8D, 0xCD, 0x3D, 0x88, 0xE5, 0x4C, 0x2A,
+ 0x76, 0xD4, 0x57, 0xED
+};
+#define A2_len sizeof(A2)
+static uint8_t C2[] = {
+ 0x13, 0xB4, 0xC7, 0x2B, 0x38, 0x9D, 0xC5, 0x01,
+ 0x8E, 0x72, 0xA1, 0x71, 0xDD, 0x85, 0xA5, 0xD3,
+ 0x75, 0x22, 0x74, 0xD3, 0xA0, 0x19, 0xFB, 0xCA,
+ 0xED, 0x09, 0xA4, 0x25, 0xCD, 0x9B, 0x2E, 0x1C,
+ 0x9B, 0x72, 0xEE, 0xE7, 0xC9, 0xDE, 0x7D, 0x52,
+ 0xB3, 0xF3
+};
+static uint8_t T2[] = {
+ 0xD6, 0xA5, 0x28, 0x4F, 0x4A, 0x6D, 0x3F, 0xE2,
+ 0x2A, 0x5D, 0x6C, 0x2B, 0x96, 0x04, 0x94, 0xC3
+};
+
+/*
+ * http://csrc.nist.gov/groups/STM/cavp/gcmtestvectors.zip
+ * gcmEncryptExtIV128.rsp
+ *
+ * [Keylen = 128]
+ * [IVlen = 96]
+ * [PTlen = 128]
+ * [AADlen = 128]
+ * [Taglen = 128]
+ * Count = 0
+ * K: c939cc13397c1d37de6ae0e1cb7c423c
+ * IV: b3d8cc017cbb89b39e0f67e2
+ * P: c3b3c41f113a31b73d9a5cd432103069
+ * AAD: 24825602bd12a984e0092d3e448eda5f
+ * C: 93fe7d9e9bfd10348a5606e5cafa7354
+ * AT: 0032a1dc85f1c9786925a2e71d8272dd
+ */
+static uint8_t K3[] = {
+ 0xc9, 0x39, 0xcc, 0x13, 0x39, 0x7c, 0x1d, 0x37,
+ 0xde, 0x6a, 0xe0, 0xe1, 0xcb, 0x7c, 0x42, 0x3c
+};
+static uint8_t IV3[] = {
+ 0xb3, 0xd8, 0xcc, 0x01, 0x7c, 0xbb, 0x89, 0xb3,
+ 0x9e, 0x0f, 0x67, 0xe2
+};
+static uint8_t P3[] = {
+ 0xc3, 0xb3, 0xc4, 0x1f, 0x11, 0x3a, 0x31, 0xb7,
+ 0x3d, 0x9a, 0x5c, 0xd4, 0x32, 0x10, 0x30, 0x69
+};
+static uint8_t A3[] = {
+ 0x24, 0x82, 0x56, 0x02, 0xbd, 0x12, 0xa9, 0x84,
+ 0xe0, 0x09, 0x2d, 0x3e, 0x44, 0x8e, 0xda, 0x5f
+};
+#define A3_len sizeof(A3)
+static uint8_t C3[] = {
+ 0x93, 0xfe, 0x7d, 0x9e, 0x9b, 0xfd, 0x10, 0x34,
+ 0x8a, 0x56, 0x06, 0xe5, 0xca, 0xfa, 0x73, 0x54
+};
+static uint8_t T3[] = {
+ 0x00, 0x32, 0xa1, 0xdc, 0x85, 0xf1, 0xc9, 0x78,
+ 0x69, 0x25, 0xa2, 0xe7, 0x1d, 0x82, 0x72, 0xdd
+};
+
+/*
+ * http://csrc.nist.gov/groups/STM/cavp/gcmtestvectors.zip
+ * gcmEncryptExtIV128.rsp
+ *
+ * [Keylen = 128]
+ * [IVlen = 96]
+ * [PTlen = 256]
+ * [AADlen = 128]
+ * [Taglen = 128]
+ * Count = 0
+ * K = 298efa1ccf29cf62ae6824bfc19557fc
+ * IV = 6f58a93fe1d207fae4ed2f6d
+ * P = cc38bccd6bc536ad919b1395f5d63801f99f8068d65ca5ac63872daf16b93901
+ * AAD = 021fafd238463973ffe80256e5b1c6b1
+ * C = dfce4e9cd291103d7fe4e63351d9e79d3dfd391e3267104658212da96521b7db
+ * T = 542465ef599316f73a7a560509a2d9f2
+ */
+static uint8_t K4[] = {
+ 0x29, 0x8e, 0xfa, 0x1c, 0xcf, 0x29, 0xcf, 0x62,
+ 0xae, 0x68, 0x24, 0xbf, 0xc1, 0x95, 0x57, 0xfc
+};
+static uint8_t IV4[] = {
+ 0x6f, 0x58, 0xa9, 0x3f, 0xe1, 0xd2, 0x07, 0xfa,
+ 0xe4, 0xed, 0x2f, 0x6d
+};
+static uint8_t P4[] = {
+ 0xcc, 0x38, 0xbc, 0xcd, 0x6b, 0xc5, 0x36, 0xad,
+ 0x91, 0x9b, 0x13, 0x95, 0xf5, 0xd6, 0x38, 0x01,
+ 0xf9, 0x9f, 0x80, 0x68, 0xd6, 0x5c, 0xa5, 0xac,
+ 0x63, 0x87, 0x2d, 0xaf, 0x16, 0xb9, 0x39, 0x01
+};
+static uint8_t A4[] = {
+ 0x02, 0x1f, 0xaf, 0xd2, 0x38, 0x46, 0x39, 0x73,
+ 0xff, 0xe8, 0x02, 0x56, 0xe5, 0xb1, 0xc6, 0xb1
+};
+#define A4_len sizeof(A4)
+static uint8_t C4[] = {
+ 0xdf, 0xce, 0x4e, 0x9c, 0xd2, 0x91, 0x10, 0x3d,
+ 0x7f, 0xe4, 0xe6, 0x33, 0x51, 0xd9, 0xe7, 0x9d,
+ 0x3d, 0xfd, 0x39, 0x1e, 0x32, 0x67, 0x10, 0x46,
+ 0x58, 0x21, 0x2d, 0xa9, 0x65, 0x21, 0xb7, 0xdb
+};
+static uint8_t T4[] = {
+ 0x54, 0x24, 0x65, 0xef, 0x59, 0x93, 0x16, 0xf7,
+ 0x3a, 0x7a, 0x56, 0x05, 0x09, 0xa2, 0xd9, 0xf2
+};
+
+/*
+ * http://csrc.nist.gov/groups/STM/cavp/gcmtestvectors.zip
+ * gcmEncryptExtIV128.rsp
+ *
+ * [Keylen = 128]
+ * [IVlen = 96]
+ * [PTlen = 256]
+ * [AADlen = 128]
+ * [Taglen = 128]
+ * Count = 0
+ * K = 298efa1ccf29cf62ae6824bfc19557fc
+ * IV = 6f58a93fe1d207fae4ed2f6d
+ * P = cc38bccd6bc536ad919b1395f5d63801f99f8068d65ca5ac63872daf16b93901
+ * AAD = 021fafd238463973ffe80256e5b1c6b1
+ * C = dfce4e9cd291103d7fe4e63351d9e79d3dfd391e3267104658212da96521b7db
+ * T = 542465ef599316f73a7a560509a2d9f2
+ */
+static uint8_t K5[] = {
+ 0x29, 0x8e, 0xfa, 0x1c, 0xcf, 0x29, 0xcf, 0x62,
+ 0xae, 0x68, 0x24, 0xbf, 0xc1, 0x95, 0x57, 0xfc
+};
+static uint8_t IV5[] = {
+ 0x6f, 0x58, 0xa9, 0x3f, 0xe1, 0xd2, 0x07, 0xfa,
+ 0xe4, 0xed, 0x2f, 0x6d
+};
+static uint8_t P5[] = {
+ 0xcc, 0x38, 0xbc, 0xcd, 0x6b, 0xc5, 0x36, 0xad,
+ 0x91, 0x9b, 0x13, 0x95, 0xf5, 0xd6, 0x38, 0x01,
+ 0xf9, 0x9f, 0x80, 0x68, 0xd6, 0x5c, 0xa5, 0xac,
+ 0x63, 0x87, 0x2d, 0xaf, 0x16, 0xb9, 0x39, 0x01
+};
+static uint8_t A5[] = {
+ 0x02, 0x1f, 0xaf, 0xd2, 0x38, 0x46, 0x39, 0x73,
+ 0xff, 0xe8, 0x02, 0x56, 0xe5, 0xb1, 0xc6, 0xb1
+};
+#define A5_len sizeof(A5)
+static uint8_t C5[] = {
+ 0xdf, 0xce, 0x4e, 0x9c, 0xd2, 0x91, 0x10, 0x3d,
+ 0x7f, 0xe4, 0xe6, 0x33, 0x51, 0xd9, 0xe7, 0x9d,
+ 0x3d, 0xfd, 0x39, 0x1e, 0x32, 0x67, 0x10, 0x46,
+ 0x58, 0x21, 0x2d, 0xa9, 0x65, 0x21, 0xb7, 0xdb
+};
+static uint8_t T5[] = {
+ 0x54, 0x24, 0x65, 0xef, 0x59, 0x93, 0x16, 0xf7,
+ 0x3a, 0x7a, 0x56, 0x05, 0x09, 0xa2, 0xd9, 0xf2
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/
+ * proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 2
+ * K: 00000000000000000000000000000000
+ * P: 00000000000000000000000000000000
+ * IV: 000000000000000000000000
+ * C: 0388dace60b6a392f328c2b971b2fe78
+ * T: ab6e47d42cec13bdf53a67b21257bddf
+ * H: 66e94bd4ef8a2c3b884cfa59ca342b2e
+ */
+static uint8_t K6[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static uint8_t P6[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static uint8_t IV6[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+static uint8_t A6[] = {0};
+#define A6_len 0
+static uint8_t C6[] = {
+ 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+ 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78
+};
+static uint8_t T6[] = {
+ 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+ 0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/
+ * documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 3
+ * K: feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b391aafd255
+ * IV: cafebabefacedbaddecaf888
+ * H: b83b533708bf535d0aa6e52980d53b78
+ * C: 42831ec2217774244b7221b784d0d49c
+ * e3aa212f2c02a4e035c17e2329aca12e
+ * 21d514b25466931c7d8f6a5aac84aa05
+ * 1ba30b396a0aac973d58e091473f5985
+ * T: 4d5c2af327cd64a62cf35abd2ba6fab4
+ */
+static uint8_t K7[] = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+};
+static uint8_t P7[] = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55
+};
+static uint8_t IV7[] = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+};
+static uint8_t A7[] = {0};
+#define A7_len 0
+static uint8_t C7[] = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+};
+static uint8_t T7[] = {
+ 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+ 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/
+ * documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 4
+ * K: feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b39
+ * A: feedfacedeadbeeffeedfacedeadbeef
+ * abaddad2
+ * IV: cafebabefacedbaddecaf888
+ * H: b83b533708bf535d0aa6e52980d53b78
+ * C: 42831ec2217774244b7221b784d0d49c
+ * e3aa212f2c02a4e035c17e2329aca12e
+ * 21d514b25466931c7d8f6a5aac84aa05
+ * 1ba30b396a0aac973d58e091
+ * T: 5bc94fbc3221a5db94fae95ae7121a47
+ */
+static uint8_t K8[] = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+};
+static uint8_t P8[] = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+};
+static uint8_t A8[] = {
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0xab, 0xad, 0xda, 0xd2
+};
+#define A8_len sizeof(A8)
+static uint8_t IV8[] = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+};
+static uint8_t C8[] = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+};
+static uint8_t T8[] = {
+ 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb,
+ 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/
+ * documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 14
+ * K: 00000000000000000000000000000000
+ * 00000000000000000000000000000000
+ * P: 00000000000000000000000000000000
+ * A:
+ * IV: 000000000000000000000000
+ * H: dc95c078a2408989ad48a21492842087
+ * C: cea7403d4d606b6e074ec5d3baf39d18
+ * T: d0d1c8a799996bf0265b98b5d48ab919
+ */
+static uint8_t K9[] = {
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
+};
+static uint8_t P9[] = {
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
+};
+static uint8_t A9[] = {0};
+#define A9_len 0
+static uint8_t IV9[] = {
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0
+};
+static uint8_t C9[] = {
+ 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e,
+ 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18
+};
+static uint8_t T9[] = {
+ 0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0,
+ 0x26, 0x5b, 0x98, 0xb5, 0xd4, 0x8a, 0xb9, 0x19
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/
+ * proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 15
+ * K: feffe9928665731c6d6a8f9467308308
+ * feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b391aafd255
+ * A:
+ * IV: cafebabefacedbaddecaf888
+ * H: acbef20579b4b8ebce889bac8732dad7
+ * C: 522dc1f099567d07f47f37a32a84427d
+ * 643a8cdcbfe5c0c97598a2bd2555d1aa
+ * 8cb08e48590dbb3da7b08b1056828838
+ * c5f61e6393ba7a0abcc9f662898015ad
+ * T: b094dac5d93471bdec1a502270e3cc6c
+ */
+static uint8_t K10[] = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+};
+static uint8_t P10[] = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55
+};
+static uint8_t A10[] = {0};
+#define A10_len 0
+static uint8_t IV10[] = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+};
+static uint8_t C10[] = {
+ 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
+ 0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
+ 0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
+ 0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
+ 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
+ 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
+ 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
+ 0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad
+};
+static uint8_t T10[] = {
+ 0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd,
+ 0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/
+ * proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 16
+ * K: feffe9928665731c6d6a8f9467308308
+ * feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b39
+ * A: feedfacedeadbeeffeedfacedeadbeef
+ * abaddad2
+ * IV: cafebabefacedbaddecaf888
+ * H: acbef20579b4b8ebce889bac8732dad7
+ * C: 522dc1f099567d07f47f37a32a84427d
+ * 643a8cdcbfe5c0c97598a2bd2555d1aa
+ * 8cb08e48590dbb3da7b08b1056828838
+ * c5f61e6393ba7a0abcc9f662
+ * T: 76fc6ece0f4e1768cddf8853bb2d551b
+ */
+static uint8_t K11[] = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+};
+static uint8_t P11[] = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+};
+static uint8_t A11[] = {
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0xab, 0xad, 0xda, 0xd2
+};
+#define A11_len sizeof(A11)
+static uint8_t IV11[] = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+};
+static uint8_t C11[] = {
+ 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07,
+ 0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d,
+ 0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9,
+ 0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa,
+ 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d,
+ 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38,
+ 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a,
+ 0xbc, 0xc9, 0xf6, 0x62
+};
+static uint8_t T11[] = {
+ 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68,
+ 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b
+};
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/
+ * proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 17 -- Not supported IV length less than 12 bytes
+ * K: feffe9928665731c6d6a8f9467308308
+ * feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b39
+ * A: feedfacedeadbeeffeedfacedeadbeef
+ * abaddad2
+ * IV: cafebabefacedbad
+ * H: acbef20579b4b8ebce889bac8732dad7
+ * C: c3762df1ca787d32ae47c13bf19844cb
+ * af1ae14d0b976afac52ff7d79bba9de0
+ * feb582d33934a4f0954cc2363bc73f78
+ * 62ac430e64abe499f47c9b1f
+ * T: 3a337dbf46a792c45e454913fe2ea8f2
+ */
+/* static uint8_t K12[] = { */
+/* 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, */
+/* 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, */
+/* 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, */
+/* 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 */
+/* }; */
+/* static uint8_t P12[] = { */
+/* 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, */
+/* 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, */
+/* 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, */
+/* 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, */
+/* 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, */
+/* 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, */
+/* 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, */
+/* 0xba, 0x63, 0x7b, 0x39 */
+/* }; */
+/* static uint8_t A12[] = { */
+/* 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, */
+/* 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, */
+/* 0xab, 0xad, 0xda, 0xd2 */
+/* }; */
+/* static uint8_t IV12[] = { */
+/* 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad */
+/* }; */
+/* static uint8_t H12[] = { */
+/* 0xac, 0xbe, 0xf2, 0x05, 0x79, 0xb4, 0xb8, 0xeb, */
+/* 0xce, 0x88, 0x9b, 0xac, 0x87, 0x32, 0xda, 0xd7 */
+/* }; */
+/* static uint8_t C12[] = { */
+/* 0xc3, 0x76, 0x2d, 0xf1, 0xca, 0x78, 0x7d, 0x32, */
+/* 0xae, 0x47, 0xc1, 0x3b, 0xf1, 0x98, 0x44, 0xcb, */
+/* 0xaf, 0x1a, 0xe1, 0x4d, 0x0b, 0x97, 0x6a, 0xfa, */
+/* 0xc5, 0x2f, 0xf7, 0xd7, 0x9b, 0xba, 0x9d, 0xe0, */
+/* 0xfe, 0xb5, 0x82, 0xd3, 0x39, 0x34, 0xa4, 0xf0, */
+/* 0x95, 0x4c, 0xc2, 0x36, 0x3b, 0xc7, 0x3f, 0x78, */
+/* 0x62, 0xac, 0x43, 0x0e, 0x64, 0xab, 0xe4, 0x99, */
+/* 0xf4, 0x7c, 0x9b, 0x1f */
+/* }; */
+/* static uint8_t T12[] = { */
+/* 0x3a, 0x33, 0x7d, 0xbf, 0x46, 0xa7, 0x92, 0xc4, */
+/* 0x5e, 0x45, 0x49, 0x13, 0xfe, 0x2e, 0xa8, 0xf2 */
+/* }; */
+
+/*
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/
+ * proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * Test Case 18 -- Not supported IV length greater than 12 bytes
+ * K: feffe9928665731c6d6a8f9467308308
+ * feffe9928665731c6d6a8f9467308308
+ * P: d9313225f88406e5a55909c5aff5269a
+ * 86a7a9531534f7da2e4c303d8a318a72
+ * 1c3c0c95956809532fcf0e2449a6b525
+ * b16aedf5aa0de657ba637b39
+ * A: feedfacedeadbeeffeedfacedeadbeef
+ * abaddad2
+ * IV: 9313225df88406e555909c5aff5269aa
+ * 6a7a9538534f7da1e4c303d2a318a728
+ * c3c0c95156809539fcf0e2429a6b5254
+ * 16aedbf5a0de6a57a637b39b
+ * H: acbef20579b4b8ebce889bac8732dad7
+ * C: 5a8def2f0c9e53f1f75d7853659e2a20
+ * eeb2b22aafde6419a058ab4f6f746bf4
+ * 0fc0c3b780f244452da3ebf1c5d82cde
+ * a2418997200ef82e44ae7e3f
+ * T: a44a8266ee1c8eb0c8b5d4cf5ae9f19a
+ */
+
+/*
+ * https://tools.ietf.org/html/draft-mcgrew-gcm-test-01
+ * case #7
+ */
+/********************************************************
+ key = feffe9928665731c6d6a8f9467308308
+ feffe9928665731c
+ (24 octets)
+ spi = 0000a5f8
+ seq = 0000000a
+ (4 octets)
+ nonce = cafebabefacedbaddecaf888
+ plaintext = 45000028a4ad4000400678800a01038f
+ 0a010612802306b8cb712602dd6bb03e
+ 501016d075680001
+ (40 octets)
+ aad = 0000a5f80000000a
+ (8 octets)
+ ctext+tag = a5b1f8066029aea40e598b8122de0242
+ 0938b3ab33f828e687b8858b5bfbdbd0
+ 315b27452144cc7795457b9652037f53
+ 18027b5b4cd7a636
+ (56 octets)
+********************************************************/
+static uint8_t K13[] = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+};
+static uint8_t IV13[] = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88,
+};
+static uint8_t A13[] = {
+ 0x00, 0x00, 0xa5, 0xf8, 0x00, 0x00, 0x00, 0x0a,
+};
+#define A13_len sizeof(A13)
+static uint8_t P13[] = {
+ 0x45, 0x00, 0x00, 0x28, 0xa4, 0xad, 0x40, 0x00,
+ 0x40, 0x06, 0x78, 0x80, 0x0a, 0x01, 0x03, 0x8f,
+ 0x0a, 0x01, 0x06, 0x12, 0x80, 0x23, 0x06, 0xb8,
+ 0xcb, 0x71, 0x26, 0x02, 0xdd, 0x6b, 0xb0, 0x3e,
+ 0x50, 0x10, 0x16, 0xd0, 0x75, 0x68, 0x00, 0x01,
+};
+static uint8_t T13[] = {
+ 0x95, 0x45, 0x7b, 0x96, 0x52, 0x03, 0x7f, 0x53,
+ 0x18, 0x02, 0x7b, 0x5b, 0x4c, 0xd7, 0xa6, 0x36,
+};
+static uint8_t C13[] = {
+ 0xa5, 0xb1, 0xf8, 0x06, 0x60, 0x29, 0xae, 0xa4,
+ 0x0e, 0x59, 0x8b, 0x81, 0x22, 0xde, 0x02, 0x42,
+ 0x09, 0x38, 0xb3, 0xab, 0x33, 0xf8, 0x28, 0xe6,
+ 0x87, 0xb8, 0x85, 0x8b, 0x5b, 0xfb, 0xdb, 0xd0,
+ 0x31, 0x5b, 0x27, 0x45, 0x21, 0x44, 0xcc, 0x77,
+};
+
+/*
+ * Test Case 14 -- GHASH output Auth Tag length of 4 bytes
+ */
+#define K14 K11
+#define P14 P11
+#define A14 A11
+#define A14_len sizeof(A14)
+#define IV14 IV11
+#define C14 C11
+static uint8_t T14[] = {
+ 0x76, 0xfc, 0x6e, 0xce
+};
+
+/*
+ * Test Case 15 -- GHASH output Auth Tag length of 8 bytes
+ */
+#define K15 K11
+#define P15 P11
+#define A15 A11
+#define A15_len sizeof(A15)
+#define IV15 IV11
+#define C15 C11
+static uint8_t T15[] = {
+ 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68
+};
+
+/*
+ * Test Case 16 -- GHASH output Auth Tag length of 14 bytes
+ */
+#define K16 K11
+#define P16 P11
+#define A16 A11
+#define A16_len sizeof(A16)
+#define IV16 IV11
+#define C16 C11
+static uint8_t T16[] = {
+ 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68,
+ 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d
+};
+
+/*
+ * Test Case 17 -- GHASH output Auth Tag length of 15 bytes
+ */
+#define K17 K11
+#define P17 P11
+#define A17 A11
+#define A17_len sizeof(A17)
+#define IV17 IV11
+#define C17 C11
+static uint8_t T17[] = {
+ 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68,
+ 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55
+};
+
+/*
+ * Test Case 18 -- No plaintext
+ */
+static uint8_t K18[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+};
+static uint8_t IV18[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B
+};
+static uint8_t A18[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+
+#define P18 NULL
+#define C18 NULL
+#define P18_len 0
+#define A18_len sizeof(A18)
+
+static uint8_t T18[] = {
+ 0x8D, 0xF7, 0xD8, 0xED, 0xB9, 0x91, 0x65, 0xFA,
+ 0xAD, 0x1B, 0x03, 0x8C, 0x53, 0xB3, 0x20, 0xE8
+};
+
+/*
+ * Test Case 19 -- No AAD
+ */
+static uint8_t P19[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+};
+static uint8_t C19[] = {
+ 0x93, 0x6D, 0xA5, 0xCD, 0x62, 0x1E, 0xF1, 0x53,
+ 0x43, 0xDB, 0x6B, 0x81, 0x3A, 0xAE, 0x7E, 0x07
+};
+
+#define K19 K18
+#define IV19 IV18
+#define P19_len sizeof(P19)
+#define A19 NULL
+#define A19_len 0
+
+static uint8_t T19[] = {
+ 0xFE, 0x8E, 0xC5, 0x55, 0x5F, 0x36, 0x08, 0xF7,
+ 0x0E, 0xBC, 0x7F, 0xCE, 0xE9, 0x59, 0x2E, 0x9B
+};
+
+/*
+ * Test Case 20 -- No plaintext, no AAD
+ */
+#define K20 K18
+#define IV20 IV18
+#define P20 NULL
+#define C20 NULL
+#define P20_len 0
+#define A20 NULL
+#define A20_len 0
+
+static uint8_t T20[] = {
+ 0x43, 0x5B, 0x9B, 0xA1, 0x2D, 0x75, 0xA4, 0xBE,
+ 0x8A, 0x97, 0x7E, 0xA3, 0xCD, 0x01, 0x18, 0x90
+};
+
+/*
+ * Test Case 21 -- Variable tag size (1 byte)
+ */
+
+#define K21 K1
+#define IV21 IV1
+#define P21 P1
+#define C21 C1
+#define A21 A1
+#define A21_len A1_len
+
+static uint8_t T21[] = {
+ 0x4F
+};
+
+static const struct gcm_ctr_vector gcm_vectors[] = {
+ /*
+ * field order {K, Klen, IV, IVlen, A, Alen, P, Plen, C, T, Tlen};
+ * original vector does not have a valid sub hash key
+ */
+ vector(1),
+ vector(2),
+ vector(3),
+ vector(4),
+ vector(5),
+ vector(6),
+ vector(7),
+ vector(8),
+ vector(9),
+ vector(10),
+ vector(11),
+ /* vector(12), -- IV of less than 16bytes are not supported */
+ vector(13),
+ vector(14),
+ vector(15),
+ vector(16),
+ vector(17),
+ extra_vector(18),
+ extra_vector(19),
+ extra_vector(20),
+ vector(21),
+};
+
+typedef void (*gcm_enc_dec_fn_t)(const struct gcm_key_data *,
+ struct gcm_context_data *,
+ uint8_t *, const uint8_t *, uint64_t,
+ const uint8_t *, const uint8_t *, uint64_t,
+ uint8_t *, uint64_t);
+
+static MB_MGR *p_gcm_mgr = NULL;
+
+static int check_data(const uint8_t *test, const uint8_t *expected,
+ uint64_t len, const char *data_name)
+{
+ int mismatch;
+ int is_error = 0;
+
+ mismatch = memcmp(test, expected, len);
+ if (mismatch) {
+ uint64_t a;
+
+ is_error = 1;
+ printf(" expected results don't match %s \t\t", data_name);
+ for (a = 0; a < len; a++) {
+ if (test[a] != expected[a]) {
+ printf(" '%x' != '%x' at %llx of %llx\n",
+ test[a], expected[a],
+ (unsigned long long) a,
+ (unsigned long long) len);
+ break;
+ }
+ }
+ }
+ return is_error;
+}
+
+/*****************************************************************************
+ * RAW API
+ *****************************************************************************/
+static void
+aes_gcm_enc_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES128_GCM_ENC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+static void
+aes_gcm_dec_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES128_GCM_DEC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+static void
+aes_gcm_enc_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES192_GCM_ENC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+static void
+aes_gcm_dec_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES192_GCM_DEC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+static void
+aes_gcm_enc_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES256_GCM_ENC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+static void
+aes_gcm_dec_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES256_GCM_DEC(p_gcm_mgr, key, ctx, out, in, len, iv,
+ aad, aad_len, auth_tag, auth_tag_len);
+}
+
+/*****************************************************************************
+ * RAW SGL API
+ *****************************************************************************/
+static void
+sgl_aes_gcm_enc_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES128_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES128_GCM_ENC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES128_GCM_ENC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+static void
+sgl_aes_gcm_dec_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES128_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES128_GCM_DEC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES128_GCM_DEC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+static void
+sgl_aes_gcm_enc_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES192_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES192_GCM_ENC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES192_GCM_ENC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+static void
+sgl_aes_gcm_dec_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES192_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES192_GCM_DEC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES192_GCM_DEC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+static void
+sgl_aes_gcm_enc_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES256_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES256_GCM_ENC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES256_GCM_ENC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+static void
+sgl_aes_gcm_dec_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ IMB_AES256_GCM_INIT(p_gcm_mgr, key, ctx, iv, aad, aad_len);
+ IMB_AES256_GCM_DEC_UPDATE(p_gcm_mgr, key, ctx, out, in, len);
+ IMB_AES256_GCM_DEC_FINALIZE(p_gcm_mgr, key, ctx,
+ auth_tag, auth_tag_len);
+}
+
+/*****************************************************************************
+ * job API
+ *****************************************************************************/
+static void
+aes_gcm_job(MB_MGR *mb_mgr,
+ JOB_CHAIN_ORDER order,
+ const struct gcm_key_data *key,
+ uint64_t key_len,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ JOB_AES_HMAC *job;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ if (!job) {
+ fprintf(stderr, "failed to get job\n");
+ return;
+ }
+
+ job->cipher_mode = GCM;
+ job->hash_alg = AES_GMAC;
+ job->chain_order = order;
+ job->aes_enc_key_expanded = key;
+ job->aes_dec_key_expanded = key;
+ job->aes_key_len_in_bytes = key_len;
+ job->src = in;
+ job->dst = out;
+ job->msg_len_to_cipher_in_bytes = len;
+ job->cipher_start_src_offset_in_bytes = UINT64_C(0);
+ job->iv = iv;
+ job->iv_len_in_bytes = 12;
+ job->u.GCM.aad = aad;
+ job->u.GCM.aad_len_in_bytes = aad_len;
+ job->auth_tag_output = auth_tag;
+ job->auth_tag_output_len_in_bytes = auth_tag_len;
+ job->cipher_direction =
+ (order == CIPHER_HASH) ? ENCRYPT : DECRYPT;
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ while (job) {
+ if (job->status != STS_COMPLETED)
+ fprintf(stderr, "failed job, status:%d\n", job->status);
+ job = IMB_GET_COMPLETED_JOB(mb_mgr);
+ }
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ if (job->status != STS_COMPLETED)
+ fprintf(stderr, "failed job, status:%d\n", job->status);
+ }
+}
+
+static void
+job_aes_gcm_enc_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, CIPHER_HASH, key, AES_128_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+static void
+job_aes_gcm_dec_128(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, HASH_CIPHER, key, AES_128_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+static void
+job_aes_gcm_enc_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, CIPHER_HASH, key, AES_192_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+static void
+job_aes_gcm_dec_192(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, HASH_CIPHER, key, AES_192_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+static void
+job_aes_gcm_enc_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, CIPHER_HASH, key, AES_256_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+static void
+job_aes_gcm_dec_256(const struct gcm_key_data *key,
+ struct gcm_context_data *ctx,
+ uint8_t *out, const uint8_t *in, uint64_t len,
+ const uint8_t *iv, const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len)
+{
+ (void) ctx; /* unused */
+ aes_gcm_job(p_gcm_mgr, HASH_CIPHER, key, AES_256_BYTES,
+ out, in, len,
+ iv, aad, aad_len,
+ auth_tag, auth_tag_len);
+}
+
+/*****************************************************************************/
+
+static int
+test_gcm_vectors(struct gcm_ctr_vector const *vector,
+ gcm_enc_dec_fn_t encfn,
+ gcm_enc_dec_fn_t decfn)
+{
+ struct gcm_key_data gdata_key;
+ struct gcm_context_data gdata_ctx;
+ int is_error = 0;
+ /* Temporary array for the calculated vectors */
+ uint8_t *ct_test = NULL;
+ uint8_t *pt_test = NULL;
+ uint8_t *T_test = NULL;
+ uint8_t *T2_test = NULL;
+
+#ifdef DEBUG
+ printf("Testing GCM128 std vectors\n");
+#endif
+ if (vector->Plen != 0) {
+ /* Allocate space for the calculated ciphertext */
+ ct_test = malloc(vector->Plen);
+ if (ct_test == NULL) {
+ fprintf(stderr, "Can't allocate ciphertext memory\n");
+ is_error = 1;
+ goto test_gcm_vectors_exit;
+ }
+ /* Allocate space for the calculated plaintext */
+ pt_test = malloc(vector->Plen);
+ if (pt_test == NULL) {
+ fprintf(stderr, "Can't allocate plaintext memory\n");
+ is_error = 1;
+ goto test_gcm_vectors_exit;
+ }
+ }
+
+ T_test = malloc(vector->Tlen);
+ if (T_test == NULL) {
+ fprintf(stderr, "Can't allocate tag memory\n");
+ is_error = 1;
+ goto test_gcm_vectors_exit;
+ }
+ T2_test = malloc(vector->Tlen);
+ if (T2_test == NULL) {
+ fprintf(stderr, "Can't allocate tag(2) memory\n");
+ is_error = 1;
+ goto test_gcm_vectors_exit;
+ }
+ /* This is only required once for a given key */
+ switch (vector->Klen) {
+ case BITS_128:
+ IMB_AES128_GCM_PRE(p_gcm_mgr, vector->K, &gdata_key);
+ break;
+ case BITS_192:
+ IMB_AES192_GCM_PRE(p_gcm_mgr, vector->K, &gdata_key);
+ break;
+ case BITS_256:
+ default:
+ IMB_AES256_GCM_PRE(p_gcm_mgr, vector->K, &gdata_key);
+ break;
+ }
+
+ /*
+ * Encrypt
+ */
+ encfn(&gdata_key, &gdata_ctx,
+ ct_test, vector->P, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T_test, vector->Tlen);
+ is_error |= check_data(ct_test, vector->C, vector->Plen,
+ "encrypted cypher text (C)");
+ is_error |= check_data(T_test, vector->T, vector->Tlen, "tag (T)");
+
+ /* test of in-place encrypt */
+ memcpy(pt_test, vector->P, vector->Plen);
+ encfn(&gdata_key, &gdata_ctx, pt_test, pt_test, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T_test, vector->Tlen);
+ is_error |= check_data(pt_test, vector->C, vector->Plen,
+ "encrypted cypher text(in-place)");
+ memset(ct_test, 0, vector->Plen);
+ memset(T_test, 0, vector->Tlen);
+
+ /*
+ * Decrypt
+ */
+ decfn(&gdata_key, &gdata_ctx, pt_test, vector->C, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T_test, vector->Tlen);
+ is_error |= check_data(pt_test, vector->P, vector->Plen,
+ "decrypted plain text (P)");
+ /*
+ * GCM decryption outputs a 16 byte tag value
+ * that must be verified against the expected tag value
+ */
+ is_error |= check_data(T_test, vector->T, vector->Tlen,
+ "decrypted tag (T)");
+
+ /* test in in-place decrypt */
+ memcpy(ct_test, vector->C, vector->Plen);
+ decfn(&gdata_key, &gdata_ctx, ct_test, ct_test, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T_test, vector->Tlen);
+ is_error |= check_data(ct_test, vector->P, vector->Plen,
+ "plain text (P) - in-place");
+ is_error |= check_data(T_test, vector->T, vector->Tlen,
+ "decrypted tag (T) - in-place");
+ /* enc -> dec */
+ encfn(&gdata_key, &gdata_ctx, ct_test, vector->P, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T_test, vector->Tlen);
+ memset(pt_test, 0, vector->Plen);
+
+ decfn(&gdata_key, &gdata_ctx, pt_test, ct_test, vector->Plen,
+ vector->IV, vector->A, vector->Alen, T2_test, vector->Tlen);
+ is_error |= check_data(pt_test, vector->P, vector->Plen,
+ "self decrypted plain text (P)");
+ is_error |= check_data(T_test, T2_test, vector->Tlen,
+ "self decrypted tag (T)");
+
+ memset(pt_test, 0, vector->Plen);
+
+ test_gcm_vectors_exit:
+ if (NULL != ct_test)
+ free(ct_test);
+ if (NULL != pt_test)
+ free(pt_test);
+ if (NULL != T_test)
+ free(T_test);
+ if (NULL != T2_test)
+ free(T2_test);
+
+ return is_error;
+}
+
+static int test_gcm_std_vectors(void)
+{
+ int const vectors_cnt = sizeof(gcm_vectors) / sizeof(gcm_vectors[0]);
+ int vect;
+ int is_error = 0;
+
+ printf("AES-GCM standard test vectors:\n");
+ for (vect = 0; vect < vectors_cnt; vect++) {
+#ifdef DEBUG
+ printf("Standard vector %d/%d Keylen:%d IVlen:%d PTLen:%d "
+ "AADlen:%d Tlen:%d\n",
+ vect, vectors_cnt - 1,
+ (int) gcm_vectors[vect].Klen,
+ (int) gcm_vectors[vect].IVlen,
+ (int) gcm_vectors[vect].Plen,
+ (int) gcm_vectors[vect].Alen,
+ (int) gcm_vectors[vect].Tlen);
+#else
+ printf(".");
+#endif
+ switch (gcm_vectors[vect].Klen) {
+ case BITS_128:
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ aes_gcm_enc_128,
+ aes_gcm_dec_128);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ sgl_aes_gcm_enc_128,
+ sgl_aes_gcm_dec_128);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ job_aes_gcm_enc_128,
+ job_aes_gcm_dec_128);
+ break;
+ case BITS_192:
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ aes_gcm_enc_192,
+ aes_gcm_dec_192);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ sgl_aes_gcm_enc_192,
+ sgl_aes_gcm_dec_192);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ job_aes_gcm_enc_192,
+ job_aes_gcm_dec_192);
+ break;
+ case BITS_256:
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ aes_gcm_enc_256,
+ aes_gcm_dec_256);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ sgl_aes_gcm_enc_256,
+ sgl_aes_gcm_dec_256);
+ is_error |= test_gcm_vectors(&gcm_vectors[vect],
+ job_aes_gcm_enc_256,
+ job_aes_gcm_dec_256);
+ break;
+ default:
+ is_error = -1;
+ break;
+ }
+ if (0 != is_error)
+ return is_error;
+ }
+ printf("\n");
+ return is_error;
+}
+
+int gcm_test(MB_MGR *p_mgr)
+{
+ int errors = 0;
+
+ p_gcm_mgr = p_mgr;
+
+ errors = test_gcm_std_vectors();
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/gcm_vectors.h b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_vectors.h
new file mode 100644
index 000000000..59f238ff3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/gcm_vectors.h
@@ -0,0 +1,38 @@
+/**********************************************************************
+ Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+#ifndef AES_GCM_VECTORS_H_
+#define AES_GCM_VECTORS_H_
+
+#include <stdint.h>
+
+#include "gcm_std_vectors_test.h"
+
+
+#endif /* AES_GCM_VECTORS_H_ */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/hmac_md5_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_md5_test.c
new file mode 100644
index 000000000..ede7fe22c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_md5_test.c
@@ -0,0 +1,558 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int hmac_md5_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+#define block_size 64
+#define digest_size 16
+#define digest96_size 12
+
+/*
+ * Test vectors from https://tools.ietf.org/html/rfc2202
+ */
+
+/*
+ * 2. Test Case 1
+ *
+ * Key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ *
+ * Key length = 16
+ *
+ * Data = "Hi There"
+ *
+ * Data length = 8
+ *
+ * Digest = 0x9294727a3638bb1c13f48ef8158bfc9d
+ *
+ * Digest96 = 0x9294727a3638bb1c13f48ef8
+ */
+#define test_case1 "1"
+#define test_case_l1 "1_long"
+#define key_len1 16
+#define data_len1 8
+#define digest_len1 digest96_size
+#define digest_len_l1 digest_size
+static const uint8_t key1[key_len1] = {
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b
+};
+static const char data1[] = "Hi There";
+static const uint8_t digest1[digest_len_l1] = {
+ 0x92, 0x94, 0x72, 0x7a, 0x36, 0x38, 0xbb, 0x1c,
+ 0x13, 0xf4, 0x8e, 0xf8, 0x15, 0x8b, 0xfc, 0x9d
+};
+
+/*
+ * 2. Test Case 2
+ *
+ * Key = "Jefe"
+ *
+ * Key length = 4
+ *
+ * Data = "what do ya want for nothing?"
+ *
+ * Data length = 28
+ *
+ * Digest = 0x750c783e6ab0b503eaa86e310a5db738
+ *
+ * Digest96 = 0x750c783e6ab0b503eaa86e31
+ */
+#define test_case2 "2"
+#define test_case_l2 "2_long"
+#define key_len2 4
+#define data_len2 28
+#define digest_len2 digest96_size
+#define digest_len_l2 digest_size
+static const char key2[] = "Jefe";
+static const char data2[] = "what do ya want for nothing?";
+static const uint8_t digest2[digest_len_l2] = {
+ 0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03,
+ 0xea, 0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38
+};
+
+/*
+ * 2. Test Case 3
+ *
+ * Key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ *
+ * Key length = 16
+ *
+ * Data = 0xdd (repeated 50 times)
+ *
+ * Data length = 50
+ *
+ * Digest = 0x56be34521d144c88dbb8c733f0e8b3f6
+ *
+ * Digest96 = 0x56be34521d144c88dbb8c733
+ */
+#define test_case3 "3"
+#define test_case_l3 "3_long"
+#define key_len3 16
+#define data_len3 50
+#define digest_len3 digest96_size
+#define digest_len_l3 digest_size
+static const uint8_t key3[key_len3] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+};
+static const uint8_t data3[data_len3] = {
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd
+};
+static const uint8_t digest3[digest_len_l3] = {
+ 0x56, 0xbe, 0x34, 0x52, 0x1d, 0x14, 0x4c, 0x88,
+ 0xdb, 0xb8, 0xc7, 0x33, 0xf0, 0xe8, 0xb3, 0xf6
+};
+
+/*
+ * 2. Test Case 4
+ *
+ * Key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
+ *
+ * Key length = 25
+ *
+ * Data = 0xcd (repeated 50 times)
+ *
+ * Data length = 50
+ *
+ * Digest = 0x697eaf0aca3a3aea3a75164746ffaa79
+ *
+ * Digest96 = 0x697eaf0aca3a3aea3a751647
+ */
+#define test_case4 "4"
+#define test_case_l4 "4_long"
+#define key_len4 25
+#define data_len4 50
+#define digest_len4 digest96_size
+#define digest_len_l4 digest_size
+static const uint8_t key4[key_len4] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19
+};
+static const uint8_t data4[data_len4] = {
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd
+};
+static const uint8_t digest4[digest_len_l4] = {
+ 0x69, 0x7e, 0xaf, 0x0a, 0xca, 0x3a, 0x3a, 0xea,
+ 0x3a, 0x75, 0x16, 0x47, 0x46, 0xff, 0xaa, 0x79
+};
+
+/*
+ * 2. Test Case 5
+ *
+ * Key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ *
+ * Key length = 16
+ *
+ * Data = "Test With Truncation"
+ *
+ * Data length = 20
+ *
+ * Digest = 0x56461ef2342edc00f9bab995690efd4c
+ *
+ * Digest96 = 0x56461ef2342edc00f9bab995
+ */
+#define test_case5 "5"
+#define test_case_l5 "5_long"
+#define key_len5 16
+#define data_len5 20
+#define digest_len5 digest96_size
+#define digest_len_l5 digest_size
+static const uint8_t key5[key_len5] = {
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c
+};
+static const char data5[] = "Test With Truncation";
+static const uint8_t digest5[digest_len_l5] = {
+ 0x56, 0x46, 0x1e, 0xf2, 0x34, 0x2e, 0xdc, 0x00,
+ 0xf9, 0xba, 0xb9, 0x95, 0x69, 0x0e, 0xfd, 0x4c
+};
+
+/*
+ * 2. Test Case 6
+ *
+ * Key = 0xaa (repeated 80 times)
+ *
+ * Key length = 80
+ *
+ * Data = "Test Using Larger Than Block-Size Key - Hash Key First"
+ *
+ * Data length = 54
+ *
+ * Digest = 0x6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd
+ *
+ * Digest96 = 0x6b1ab7fe4bd7bf8f0b62e6ce
+ */
+/* #define test_case6 "6" */
+/* #define key_len6 80 */
+/* #define data_len6 54 */
+/* #define digest_len6 digest96_size */
+/* static const uint8_t key6[key_len6] = { */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa */
+/* }; */
+/* static const char data6[] = "Test Using Larger Than Block-Size " */
+/* "Key - Hash Key First"; */
+/* static const uint8_t digest6[digest_len6] = { */
+/* 0x6b, 0x1a, 0xb7, 0xfe, 0x4b, 0xd7, 0xbf, 0x8f, */
+/* 0x0b, 0x62, 0xe6, 0xce */
+/* }; */
+
+/*
+ * 2. Test Case 7
+ *
+ * Key = 0xaa (repeated 80 times)
+ *
+ * Key length = 80
+ *
+ * Data = "Test Using Larger Than Block-Size Key and Larger"
+ *
+ * Data length = 73
+ *
+ * Digest = 0x6f630fad67cda0ee1fb1f562db3aa53e
+ *
+ * Digest96 = 0x6f630fad67cda0ee1fb1f562
+ */
+/* #define test_case7 "7" */
+/* #define key_len7 80 */
+/* #define data_len7 73 */
+/* #define digest_len7 digest96_size */
+/* static const uint8_t key7[key_len7] = { */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, */
+/* 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa */
+/* }; */
+/* static const char data7[] = "Test Using Larger Than Block-Size " */
+/* "Key and Larger Than One Block-Size Data"; */
+/* static const uint8_t digest7[digest_len7] = { */
+/* 0x6f, 0x63, 0x0f, 0xad, 0x67, 0xcd, 0xa0, 0xee, */
+/* 0x1f, 0xb1, 0xf5, 0x62 */
+/* }; */
+
+#define HMAC_MD5_TEST_VEC(num) \
+ { test_case##num, \
+ (const uint8_t *) key##num, key_len##num, \
+ (const uint8_t *) data##num, data_len##num, \
+ (const uint8_t *) digest##num, digest_len##num }
+#define HMAC_MD5_TEST_VEC_LONG(num) \
+ { test_case_l##num, \
+ (const uint8_t *) key##num, key_len##num, \
+ (const uint8_t *) data##num, data_len##num, \
+ (const uint8_t *) digest##num, digest_len_l##num }
+
+static const struct hmac_md5_rfc2202_vector {
+ const char *test_case;
+ const uint8_t *key;
+ size_t key_len;
+ const uint8_t *data;
+ size_t data_len;
+ const uint8_t *digest;
+ size_t digest_len;
+} hmac_md5_vectors[] = {
+ HMAC_MD5_TEST_VEC(1),
+ HMAC_MD5_TEST_VEC(2),
+ HMAC_MD5_TEST_VEC(3),
+ HMAC_MD5_TEST_VEC(4),
+ HMAC_MD5_TEST_VEC(5),
+ /* HMAC_MD5_TEST_VEC(6), */
+ /* HMAC_MD5_TEST_VEC(7), */
+ HMAC_MD5_TEST_VEC_LONG(1),
+ HMAC_MD5_TEST_VEC_LONG(2),
+ HMAC_MD5_TEST_VEC_LONG(3),
+ HMAC_MD5_TEST_VEC_LONG(4),
+ HMAC_MD5_TEST_VEC_LONG(5),
+};
+
+static int
+hmac_md5_job_ok(const struct hmac_md5_rfc2202_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const uint8_t *auth,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ if (job->status != STS_COMPLETED) {
+ printf("line:%d job error status:%d ", __LINE__, job->status);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(vec->digest, &auth[sizeof_padding],
+ vec->digest_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding],
+ vec->digest_len);
+ hexdump(stderr, "Expected", vec->digest,
+ vec->digest_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_hmac_md5(struct MB_MGR *mb_mgr,
+ const struct hmac_md5_rfc2202_vector *vec,
+ const int num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+ uint8_t key[block_size];
+ uint8_t buf[block_size];
+ DECLARE_ALIGNED(uint8_t ipad_hash[digest_size], 16);
+ DECLARE_ALIGNED(uint8_t opad_hash[digest_size], 16);
+ int key_len = 0;
+
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ const size_t alloc_len =
+ vec->digest_len + (sizeof(padding) * 2);
+
+ auths[i] = malloc(alloc_len);
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(auths[i], -1, alloc_len);
+ }
+
+ /* prepare the key */
+ memset(key, 0, sizeof(key));
+ if (vec->key_len <= block_size) {
+ memcpy(key, vec->key, vec->key_len);
+ key_len = (int) vec->key_len;
+ } else {
+ printf("Key length longer than block size is not supported "
+ "by MD5\n");
+ ret = 0;
+ goto end;
+ }
+
+ /* compute ipad hash */
+ memset(buf, 0x36, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+ IMB_MD5_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+ IMB_MD5_ONE_BLOCK(mb_mgr, buf, opad_hash);
+
+ /* empty the manager */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->aes_enc_key_expanded = NULL;
+ job->aes_dec_key_expanded = NULL;
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->dst = NULL;
+ job->aes_key_len_in_bytes = 0;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->digest_len;
+ job->iv = NULL;
+ job->iv_len_in_bytes = 0;
+ job->src = vec->data;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = 0;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = vec->data_len;
+ job->u.HMAC._hashed_auth_key_xor_ipad = ipad_hash;
+ job->u.HMAC._hashed_auth_key_xor_opad = opad_hash;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = MD5;
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ /*
+ * HMAC-MD5 requires 8 submissions to get one back
+ */
+ if (num_jobs < 8) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ if (!hmac_md5_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!hmac_md5_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ /* empty the manager before next tests */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_hmac_md5_std_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt =
+ sizeof(hmac_md5_vectors) / sizeof(hmac_md5_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("HMAC-MD5 standard test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("[%d/%d] RFC2202 Test Case %s key_len:%d data_len:%d "
+ "digest_len:%d\n",
+ vect, vectors_cnt,
+ hmac_md5_vectors[idx].test_case,
+ (int) hmac_md5_vectors[idx].key_len,
+ (int) hmac_md5_vectors[idx].data_len,
+ (int) hmac_md5_vectors[idx].digest_len);
+#else
+ printf(".");
+#endif
+
+ if (test_hmac_md5(mb_mgr, &hmac_md5_vectors[idx], num_jobs)) {
+ printf("error #%d\n", vect);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+hmac_md5_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ errors += test_hmac_md5_std_vectors(mb_mgr, 1);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 3);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 4);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 5);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 7);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 8);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 9);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 15);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 16);
+ errors += test_hmac_md5_std_vectors(mb_mgr, 17);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha1_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha1_test.c
new file mode 100644
index 000000000..fcb81ea1b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha1_test.c
@@ -0,0 +1,537 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int hmac_sha1_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+#define block_size 64
+#define digest_size 20
+#define digest96_size 12
+
+/*
+ * Test vectors from https://tools.ietf.org/html/rfc2202
+ */
+
+/*
+ * test_case = 1
+ * key = 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * key_len = 20
+ * data = "Hi There"
+ * data_len = 8
+ * digest = 0xb617318655057264e28bc0b6fb378c8ef146be00
+ */
+#define test_case1 "1"
+#define key_len1 20
+#define data_len1 8
+#define digest_len1 digest_size
+static const uint8_t key1[key_len1] = {
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b
+};
+static const char data1[] = "Hi There";
+static const uint8_t digest1[digest_len1] = {
+ 0xb6, 0x17, 0x31, 0x86, 0x55, 0x05, 0x72, 0x64,
+ 0xe2, 0x8b, 0xc0, 0xb6, 0xfb, 0x37, 0x8c, 0x8e,
+ 0xf1, 0x46, 0xbe, 0x00
+};
+
+/*
+ * test_case = 2
+ * key = "Jefe"
+ * key_len = 4
+ * data = "what do ya want for nothing?"
+ * data_len = 28
+ * digest = 0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+ */
+#define test_case2 "2"
+#define key_len2 4
+#define data_len2 28
+#define digest_len2 digest_size
+static const char key2[] = "Jefe";
+static const char data2[] = "what do ya want for nothing?";
+static const uint8_t digest2[digest_len2] = {
+ 0xef, 0xfc, 0xdf, 0x6a, 0xe5, 0xeb, 0x2f, 0xa2,
+ 0xd2, 0x74, 0x16, 0xd5, 0xf1, 0x84, 0xdf, 0x9c,
+ 0x25, 0x9a, 0x7c, 0x79
+};
+
+/*
+ * test_case = 3
+ * key = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * key_len = 20
+ * data = 0xdd repeated 50 times
+ * data_len = 50
+ * digest = 0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+ */
+#define test_case3 "3"
+#define key_len3 20
+#define data_len3 50
+#define digest_len3 digest_size
+static const uint8_t key3[key_len3] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa
+};
+static const uint8_t data3[data_len3] = {
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd
+};
+static const uint8_t digest3[digest_len3] = {
+ 0x12, 0x5d, 0x73, 0x42, 0xb9, 0xac, 0x11, 0xcd,
+ 0x91, 0xa3, 0x9a, 0xf4, 0x8a, 0xa1, 0x7b, 0x4f,
+ 0x63, 0xf1, 0x75, 0xd3
+};
+
+/*
+ * test_case = 4
+ * key = 0x0102030405060708090a0b0c0d0e0f10111213141516171819
+ * key_len = 25
+ * data = 0xcd repeated 50 times
+ * data_len = 50
+ * digest = 0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+ */
+#define test_case4 "4"
+#define key_len4 25
+#define data_len4 50
+#define digest_len4 digest_size
+static const uint8_t key4[key_len4] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19
+};
+static const uint8_t data4[data_len4] = {
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd
+};
+static const uint8_t digest4[digest_len4] = {
+ 0x4c, 0x90, 0x07, 0xf4, 0x02, 0x62, 0x50, 0xc6,
+ 0xbc, 0x84, 0x14, 0xf9, 0xbf, 0x50, 0xc8, 0x6c,
+ 0x2d, 0x72, 0x35, 0xda
+};
+
+/*
+ * test_case = 5
+ * key = 0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * key_len = 20
+ * data = "Test With Truncation"
+ * data_len = 20
+ * digest = 0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
+ * digest-96 = 0x4c1a03424b55e07fe7f27be1
+ */
+#define test_case5 "5"
+#define key_len5 20
+#define data_len5 20
+#define digest_len5 digest_size
+static const uint8_t key5[key_len5] = {
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c
+};
+static const char data5[] = "Test With Truncation";
+static const uint8_t digest5[digest_len5] = {
+ 0x4c, 0x1a, 0x03, 0x42, 0x4b, 0x55, 0xe0, 0x7f,
+ 0xe7, 0xf2, 0x7b, 0xe1, 0xd5, 0x8b, 0xb9, 0x32,
+ 0x4a, 0x9a, 0x5a, 0x04
+};
+
+#define test_case5_96 "5-96"
+#define key_len5_96 key_len5
+#define data_len5_96 data_len5
+#define digest_len5_96 digest96_size
+#define key5_96 key5
+#define data5_96 data5
+static const uint8_t digest5_96[digest_len5_96] = {
+ 0x4c, 0x1a, 0x03, 0x42, 0x4b, 0x55, 0xe0, 0x7f,
+ 0xe7, 0xf2, 0x7b, 0xe1
+};
+
+/*
+ * test_case = 6
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key - Hash Key First"
+ * data_len = 54
+ * digest = 0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+ */
+#define test_case6 "6"
+#define key_len6 80
+#define data_len6 54
+#define digest_len6 digest_size
+static const uint8_t key6[key_len6] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+};
+static const char data6[] =
+ "Test Using Larger Than Block-Size Key - Hash Key First";
+static const uint8_t digest6[digest_len6] = {
+ 0xaa, 0x4a, 0xe5, 0xe1, 0x52, 0x72, 0xd0, 0x0e,
+ 0x95, 0x70, 0x56, 0x37, 0xce, 0x8a, 0x3b, 0x55,
+ 0xed, 0x40, 0x21, 0x12
+};
+
+/*
+ * test_case = 7
+ * key = 0xaa repeated 80 times
+ * key_len = 80
+ * data = "Test Using Larger Than Block-Size Key and Larger
+ * Than One Block-Size Data"
+ * data_len = 73
+ * digest = 0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+ */
+#define test_case7 "7"
+#define key_len7 80
+#define data_len7 73
+#define digest_len7 digest_size
+static const uint8_t key7[key_len7] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+};
+static const char data7[] =
+ "Test Using Larger Than Block-Size Key and "
+ "Larger Than One Block-Size Data";
+static const uint8_t digest7[digest_len7] = {
+ 0xe8, 0xe9, 0x9d, 0x0f, 0x45, 0x23, 0x7d, 0x78,
+ 0x6d, 0x6b, 0xba, 0xa7, 0x96, 0x5c, 0x78, 0x08,
+ 0xbb, 0xff, 0x1a, 0x91
+};
+
+/*
+ * Test vector from https://csrc.nist.gov/csrc/media/publications/fips/198/
+ * archive/2002-03-06/documents/fips-198a.pdf
+ */
+#define test_case8 "8"
+#define key_len8 49
+#define data_len8 9
+#define digest_len8 digest96_size
+static const uint8_t key8[key_len8] = {
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0
+};
+static const char data8[] = "Sample #4";
+static const uint8_t digest8[digest_len8] = {
+ 0x9e, 0xa8, 0x86, 0xef, 0xe2, 0x68, 0xdb, 0xec,
+ 0xce, 0x42, 0x0c, 0x75
+};
+
+#define HMAC_SHA1_TEST_VEC(num) \
+ { test_case##num, \
+ (const uint8_t *) key##num, key_len##num, \
+ (const uint8_t *) data##num, data_len##num, \
+ (const uint8_t *) digest##num, digest_len##num }
+
+static const struct hmac_sha1_rfc2202_vector {
+ const char *test_case;
+ const uint8_t *key;
+ size_t key_len;
+ const uint8_t *data;
+ size_t data_len;
+ const uint8_t *digest;
+ size_t digest_len;
+} hmac_sha1_vectors[] = {
+ HMAC_SHA1_TEST_VEC(1),
+ HMAC_SHA1_TEST_VEC(2),
+ HMAC_SHA1_TEST_VEC(3),
+ HMAC_SHA1_TEST_VEC(4),
+ HMAC_SHA1_TEST_VEC(5),
+ HMAC_SHA1_TEST_VEC(5_96),
+ HMAC_SHA1_TEST_VEC(6),
+ HMAC_SHA1_TEST_VEC(7),
+ HMAC_SHA1_TEST_VEC(8)
+};
+
+static int
+hmac_sha1_job_ok(const struct hmac_sha1_rfc2202_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const uint8_t *auth,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ if (job->status != STS_COMPLETED) {
+ printf("line:%d job error status:%d ", __LINE__, job->status);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(vec->digest, &auth[sizeof_padding],
+ vec->digest_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding],
+ vec->digest_len);
+ hexdump(stderr, "Expected", vec->digest,
+ vec->digest_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_hmac_sha1(struct MB_MGR *mb_mgr,
+ const struct hmac_sha1_rfc2202_vector *vec,
+ const int num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+ uint8_t key[block_size];
+ uint8_t buf[block_size];
+ DECLARE_ALIGNED(uint8_t ipad_hash[digest_size], 16);
+ DECLARE_ALIGNED(uint8_t opad_hash[digest_size], 16);
+ int key_len = 0;
+
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ const size_t alloc_len =
+ vec->digest_len + (sizeof(padding) * 2);
+
+ auths[i] = malloc(alloc_len);
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(auths[i], -1, alloc_len);
+ }
+
+ /* prepare the key */
+ memset(key, 0, sizeof(key));
+ if (vec->key_len <= block_size) {
+ memcpy(key, vec->key, vec->key_len);
+ key_len = (int) vec->key_len;
+ } else {
+ IMB_SHA1(mb_mgr, vec->key, vec->key_len, key);
+ key_len = digest_size;
+ }
+
+ /* compute ipad hash */
+ memset(buf, 0x36, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, opad_hash);
+
+ /* empty the manager */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->aes_enc_key_expanded = NULL;
+ job->aes_dec_key_expanded = NULL;
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->dst = NULL;
+ job->aes_key_len_in_bytes = 0;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->digest_len;
+ job->iv = NULL;
+ job->iv_len_in_bytes = 0;
+ job->src = vec->data;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = 0;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = vec->data_len;
+ job->u.HMAC._hashed_auth_key_xor_ipad = ipad_hash;
+ job->u.HMAC._hashed_auth_key_xor_opad = opad_hash;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = SHA1;
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ /*
+ * SHANI HMAC-SHA implementation can return a completed
+ * job after 2nd submission
+ */
+ if (num_jobs < 2) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ if (!hmac_sha1_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!hmac_sha1_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ /* empty the manager before next tests */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_hmac_sha1_std_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt =
+ sizeof(hmac_sha1_vectors) / sizeof(hmac_sha1_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("HMAC-SHA1 standard test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("[%d/%d] RFC2202 Test Case %s key_len:%d data_len:%d "
+ "digest_len:%d\n",
+ vect, vectors_cnt,
+ hmac_sha1_vectors[idx].test_case,
+ (int) hmac_sha1_vectors[idx].key_len,
+ (int) hmac_sha1_vectors[idx].data_len,
+ (int) hmac_sha1_vectors[idx].digest_len);
+#else
+ printf(".");
+#endif
+
+ if (test_hmac_sha1(mb_mgr, &hmac_sha1_vectors[idx], num_jobs)) {
+ printf("error #%d\n", vect);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+hmac_sha1_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 1);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 3);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 4);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 5);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 7);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 8);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 9);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 15);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 16);
+ errors += test_hmac_sha1_std_vectors(mb_mgr, 17);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha256_sha512_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha256_sha512_test.c
new file mode 100644
index 000000000..578e2aaed
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/hmac_sha256_sha512_test.c
@@ -0,0 +1,1116 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int hmac_sha256_sha512_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/*
+ * Test vectors from https://tools.ietf.org/html/rfc4231
+ */
+
+/*
+ * 4.2. Test Case 1
+ *
+ * Key = 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+ * 0b0b0b0b (20 bytes)
+ * Data = 4869205468657265 ("Hi There")
+ *
+ * HMAC-SHA-224 = 896fb1128abbdf196832107cd49df33f
+ * 47b4b1169912ba4f53684b22
+ * HMAC-SHA-256 = b0344c61d8db38535ca8afceaf0bf12b
+ * 881dc200c9833da726e9376c2e32cff7
+ * HMAC-SHA-384 = afd03944d84895626b0825f4ab46907f
+ * 15f9dadbe4101ec682aa034c7cebc59c
+ * faea9ea9076ede7f4af152e8b2fa9cb6
+ * HMAC-SHA-512 = 87aa7cdea5ef619d4ff0b4241a1d6cb0
+ * 2379f4e2ce4ec2787ad0b30545e17cde
+ * daa833b7d6b8a702038b274eaea3f4e4
+ * be9d914eeb61f1702e696c203a126854
+ */
+static const uint8_t key_1[] = {
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b
+};
+static const uint8_t data_1[] = {
+ 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65
+};
+static const uint8_t hmac_sha_224_1[] = {
+ 0x89, 0x6f, 0xb1, 0x12, 0x8a, 0xbb, 0xdf, 0x19,
+ 0x68, 0x32, 0x10, 0x7c, 0xd4, 0x9d, 0xf3, 0x3f,
+ 0x47, 0xb4, 0xb1, 0x16, 0x99, 0x12, 0xba, 0x4f,
+ 0x53, 0x68, 0x4b, 0x22
+};
+static const uint8_t hmac_sha_256_1[] = {
+ 0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53,
+ 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b,
+ 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7,
+ 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7
+};
+static const uint8_t hmac_sha_384_1[] = {
+ 0xaf, 0xd0, 0x39, 0x44, 0xd8, 0x48, 0x95, 0x62,
+ 0x6b, 0x08, 0x25, 0xf4, 0xab, 0x46, 0x90, 0x7f,
+ 0x15, 0xf9, 0xda, 0xdb, 0xe4, 0x10, 0x1e, 0xc6,
+ 0x82, 0xaa, 0x03, 0x4c, 0x7c, 0xeb, 0xc5, 0x9c,
+ 0xfa, 0xea, 0x9e, 0xa9, 0x07, 0x6e, 0xde, 0x7f,
+ 0x4a, 0xf1, 0x52, 0xe8, 0xb2, 0xfa, 0x9c, 0xb6
+};
+static const uint8_t hmac_sha_512_1[] = {
+ 0x87, 0xaa, 0x7c, 0xde, 0xa5, 0xef, 0x61, 0x9d,
+ 0x4f, 0xf0, 0xb4, 0x24, 0x1a, 0x1d, 0x6c, 0xb0,
+ 0x23, 0x79, 0xf4, 0xe2, 0xce, 0x4e, 0xc2, 0x78,
+ 0x7a, 0xd0, 0xb3, 0x05, 0x45, 0xe1, 0x7c, 0xde,
+ 0xda, 0xa8, 0x33, 0xb7, 0xd6, 0xb8, 0xa7, 0x02,
+ 0x03, 0x8b, 0x27, 0x4e, 0xae, 0xa3, 0xf4, 0xe4,
+ 0xbe, 0x9d, 0x91, 0x4e, 0xeb, 0x61, 0xf1, 0x70,
+ 0x2e, 0x69, 0x6c, 0x20, 0x3a, 0x12, 0x68, 0x54
+};
+
+/*
+ * 4.3. Test Case 2
+ *
+ * Test with a key shorter than the length of the HMAC output.
+ *
+ * Key = 4a656665 ("Jefe")
+ * Data = 7768617420646f2079612077616e7420 ("what do ya want ")
+ * 666f72206e6f7468696e673f ("for nothing?")
+ *
+ * HMAC-SHA-224 = a30e01098bc6dbbf45690f3a7e9e6d0f
+ * 8bbea2a39e6148008fd05e44
+ * HMAC-SHA-256 = 5bdcc146bf60754e6a042426089575c7
+ * 5a003f089d2739839dec58b964ec3843
+ * HMAC-SHA-384 = af45d2e376484031617f78d2b58a6b1b
+ * 9c7ef464f5a01b47e42ec3736322445e
+ * 8e2240ca5e69e2c78b3239ecfab21649
+ * HMAC-SHA-512 = 164b7a7bfcf819e2e395fbe73b56e0a3
+ * 87bd64222e831fd610270cd7ea250554
+ * 9758bf75c05a994a6d034f65f8f0e6fd
+ * caeab1a34d4a6b4b636e070a38bce737
+ */
+static const uint8_t key_2[] = {
+ 0x4a, 0x65, 0x66, 0x65
+};
+static const uint8_t data_2[] = {
+ 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20,
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f
+};
+static const uint8_t hmac_sha_224_2[] = {
+ 0xa3, 0x0e, 0x01, 0x09, 0x8b, 0xc6, 0xdb, 0xbf,
+ 0x45, 0x69, 0x0f, 0x3a, 0x7e, 0x9e, 0x6d, 0x0f,
+ 0x8b, 0xbe, 0xa2, 0xa3, 0x9e, 0x61, 0x48, 0x00,
+ 0x8f, 0xd0, 0x5e, 0x44
+};
+static const uint8_t hmac_sha_256_2[] = {
+ 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e,
+ 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7,
+ 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83,
+ 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43
+};
+static const uint8_t hmac_sha_384_2[] = {
+ 0xaf, 0x45, 0xd2, 0xe3, 0x76, 0x48, 0x40, 0x31,
+ 0x61, 0x7f, 0x78, 0xd2, 0xb5, 0x8a, 0x6b, 0x1b,
+ 0x9c, 0x7e, 0xf4, 0x64, 0xf5, 0xa0, 0x1b, 0x47,
+ 0xe4, 0x2e, 0xc3, 0x73, 0x63, 0x22, 0x44, 0x5e,
+ 0x8e, 0x22, 0x40, 0xca, 0x5e, 0x69, 0xe2, 0xc7,
+ 0x8b, 0x32, 0x39, 0xec, 0xfa, 0xb2, 0x16, 0x49
+};
+static const uint8_t hmac_sha_512_2[] = {
+ 0x16, 0x4b, 0x7a, 0x7b, 0xfc, 0xf8, 0x19, 0xe2,
+ 0xe3, 0x95, 0xfb, 0xe7, 0x3b, 0x56, 0xe0, 0xa3,
+ 0x87, 0xbd, 0x64, 0x22, 0x2e, 0x83, 0x1f, 0xd6,
+ 0x10, 0x27, 0x0c, 0xd7, 0xea, 0x25, 0x05, 0x54,
+ 0x97, 0x58, 0xbf, 0x75, 0xc0, 0x5a, 0x99, 0x4a,
+ 0x6d, 0x03, 0x4f, 0x65, 0xf8, 0xf0, 0xe6, 0xfd,
+ 0xca, 0xea, 0xb1, 0xa3, 0x4d, 0x4a, 0x6b, 0x4b,
+ 0x63, 0x6e, 0x07, 0x0a, 0x38, 0xbc, 0xe7, 0x37
+};
+
+/*
+ * 4.4. Test Case 3
+ *
+ * Test with a combined length of key and data that is larger than 64
+ * bytes (= block-size of SHA-224 and SHA-256).
+ *
+ * Key aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaa (20 bytes)
+ * Data = dddddddddddddddddddddddddddddddd
+ * dddddddddddddddddddddddddddddddd
+ * dddddddddddddddddddddddddddddddd
+ * dddd (50 bytes)
+ *
+ * HMAC-SHA-224 = 7fb3cb3588c6c1f6ffa9694d7d6ad264
+ * 9365b0c1f65d69d1ec8333ea
+ * HMAC-SHA-256 = 773ea91e36800e46854db8ebd09181a7
+ * 2959098b3ef8c122d9635514ced565fe
+ * HMAC-SHA-384 = 88062608d3e6ad8a0aa2ace014c8a86f
+ * 0aa635d947ac9febe83ef4e55966144b
+ * 2a5ab39dc13814b94e3ab6e101a34f27
+ * HMAC-SHA-512 = fa73b0089d56a284efb0f0756c890be9
+ * b1b5dbdd8ee81a3655f83e33b2279d39
+ * bf3e848279a722c806b485a47e67c807
+ * b946a337bee8942674278859e13292fb
+ */
+static const uint8_t key_3[] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa
+};
+static const uint8_t data_3[] = {
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd
+};
+static const uint8_t hmac_sha_224_3[] = {
+ 0x7f, 0xb3, 0xcb, 0x35, 0x88, 0xc6, 0xc1, 0xf6,
+ 0xff, 0xa9, 0x69, 0x4d, 0x7d, 0x6a, 0xd2, 0x64,
+ 0x93, 0x65, 0xb0, 0xc1, 0xf6, 0x5d, 0x69, 0xd1,
+ 0xec, 0x83, 0x33, 0xea
+};
+static const uint8_t hmac_sha_256_3[] = {
+ 0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46,
+ 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7,
+ 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22,
+ 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe
+};
+static const uint8_t hmac_sha_384_3[] = {
+ 0x88, 0x06, 0x26, 0x08, 0xd3, 0xe6, 0xad, 0x8a,
+ 0x0a, 0xa2, 0xac, 0xe0, 0x14, 0xc8, 0xa8, 0x6f,
+ 0x0a, 0xa6, 0x35, 0xd9, 0x47, 0xac, 0x9f, 0xeb,
+ 0xe8, 0x3e, 0xf4, 0xe5, 0x59, 0x66, 0x14, 0x4b,
+ 0x2a, 0x5a, 0xb3, 0x9d, 0xc1, 0x38, 0x14, 0xb9,
+ 0x4e, 0x3a, 0xb6, 0xe1, 0x01, 0xa3, 0x4f, 0x27
+};
+static const uint8_t hmac_sha_512_3[] = {
+ 0xfa, 0x73, 0xb0, 0x08, 0x9d, 0x56, 0xa2, 0x84,
+ 0xef, 0xb0, 0xf0, 0x75, 0x6c, 0x89, 0x0b, 0xe9,
+ 0xb1, 0xb5, 0xdb, 0xdd, 0x8e, 0xe8, 0x1a, 0x36,
+ 0x55, 0xf8, 0x3e, 0x33, 0xb2, 0x27, 0x9d, 0x39,
+ 0xbf, 0x3e, 0x84, 0x82, 0x79, 0xa7, 0x22, 0xc8,
+ 0x06, 0xb4, 0x85, 0xa4, 0x7e, 0x67, 0xc8, 0x07,
+ 0xb9, 0x46, 0xa3, 0x37, 0xbe, 0xe8, 0x94, 0x26,
+ 0x74, 0x27, 0x88, 0x59, 0xe1, 0x32, 0x92, 0xfb
+};
+
+/*
+ * 4.5. Test Case 4
+ *
+ * Test with a combined length of key and data that is larger than 64
+ * bytes (= block-size of SHA-224 and SHA-256).
+ *
+ * Key = 0102030405060708090a0b0c0d0e0f10
+ * 111213141516171819 (25 bytes)
+ * Data = cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
+ * cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
+ * cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
+ * cdcd (50 bytes)
+ *
+ * HMAC-SHA-224 = 6c11506874013cac6a2abc1bb382627c
+ * ec6a90d86efc012de7afec5a
+ * HMAC-SHA-256 = 82558a389a443c0ea4cc819899f2083a
+ * 85f0faa3e578f8077a2e3ff46729665b
+ * HMAC-SHA-384 = 3e8a69b7783c25851933ab6290af6ca7
+ * 7a9981480850009cc5577c6e1f573b4e
+ * 6801dd23c4a7d679ccf8a386c674cffb
+ * HMAC-SHA-512 = b0ba465637458c6990e5a8c5f61d4af7
+ * e576d97ff94b872de76f8050361ee3db
+ * a91ca5c11aa25eb4d679275cc5788063
+ * a5f19741120c4f2de2adebeb10a298dd
+ */
+static const uint8_t key_4[] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19
+};
+static const uint8_t data_4[] = {
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd
+};
+static const uint8_t hmac_sha_224_4[] = {
+ 0x6c, 0x11, 0x50, 0x68, 0x74, 0x01, 0x3c, 0xac,
+ 0x6a, 0x2a, 0xbc, 0x1b, 0xb3, 0x82, 0x62, 0x7c,
+ 0xec, 0x6a, 0x90, 0xd8, 0x6e, 0xfc, 0x01, 0x2d,
+ 0xe7, 0xaf, 0xec, 0x5a
+};
+static const uint8_t hmac_sha_256_4[] = {
+ 0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e,
+ 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a,
+ 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07,
+ 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b
+};
+static const uint8_t hmac_sha_384_4[] = {
+ 0x3e, 0x8a, 0x69, 0xb7, 0x78, 0x3c, 0x25, 0x85,
+ 0x19, 0x33, 0xab, 0x62, 0x90, 0xaf, 0x6c, 0xa7,
+ 0x7a, 0x99, 0x81, 0x48, 0x08, 0x50, 0x00, 0x9c,
+ 0xc5, 0x57, 0x7c, 0x6e, 0x1f, 0x57, 0x3b, 0x4e,
+ 0x68, 0x01, 0xdd, 0x23, 0xc4, 0xa7, 0xd6, 0x79,
+ 0xcc, 0xf8, 0xa3, 0x86, 0xc6, 0x74, 0xcf, 0xfb
+};
+static const uint8_t hmac_sha_512_4[] = {
+ 0xb0, 0xba, 0x46, 0x56, 0x37, 0x45, 0x8c, 0x69,
+ 0x90, 0xe5, 0xa8, 0xc5, 0xf6, 0x1d, 0x4a, 0xf7,
+ 0xe5, 0x76, 0xd9, 0x7f, 0xf9, 0x4b, 0x87, 0x2d,
+ 0xe7, 0x6f, 0x80, 0x50, 0x36, 0x1e, 0xe3, 0xdb,
+ 0xa9, 0x1c, 0xa5, 0xc1, 0x1a, 0xa2, 0x5e, 0xb4,
+ 0xd6, 0x79, 0x27, 0x5c, 0xc5, 0x78, 0x80, 0x63,
+ 0xa5, 0xf1, 0x97, 0x41, 0x12, 0x0c, 0x4f, 0x2d,
+ 0xe2, 0xad, 0xeb, 0xeb, 0x10, 0xa2, 0x98, 0xdd
+};
+
+/*
+ *
+ * 4.6. Test Case 5
+ *
+ * Test with a truncation of output to 128 bits.
+ *
+ * Key = 0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+ * 0c0c0c0c (20 bytes)
+ * Data = 546573742057697468205472756e6361 ("Test With Trunca")
+ * 74696f6e ("tion")
+ *
+ * HMAC-SHA-224 = 0e2aea68a90c8d37c988bcdb9fca6fa8
+ * HMAC-SHA-256 = a3b6167473100ee06e0c796c2955552b
+ * HMAC-SHA-384 = 3abf34c3503b2a23a46efc619baef897
+ * HMAC-SHA-512 = 415fad6271580a531d4179bc891d87a6
+ */
+/* static const uint8_t key_5[] = { */
+/* 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, */
+/* 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, */
+/* 0x0c, 0x0c, 0x0c, 0x0c */
+/* }; */
+/* static const uint8_t data_5[] = { */
+/* 0x54, 0x65, 0x73, 0x74, 0x20, 0x57, 0x69, 0x74, */
+/* 0x68, 0x20, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, */
+/* 0x74, 0x69, 0x6f, 0x6e */
+/* }; */
+/* static const uint8_t hmac_sha_224_5[] = { */
+/* 0x0e, 0x2a, 0xea, 0x68, 0xa9, 0x0c, 0x8d, 0x37, */
+/* 0xc9, 0x88, 0xbc, 0xdb, 0x9f, 0xca, 0x6f, 0xa8 */
+/* }; */
+/* static const uint8_t hmac_sha_256_5[] = { */
+/* 0xa3, 0xb6, 0x16, 0x74, 0x73, 0x10, 0x0e, 0xe0, */
+/* 0x6e, 0x0c, 0x79, 0x6c, 0x29, 0x55, 0x55, 0x2b */
+/* }; */
+/* static const uint8_t hmac_sha_384_5[] = { */
+/* 0x3a, 0xbf, 0x34, 0xc3, 0x50, 0x3b, 0x2a, 0x23, */
+/* 0xa4, 0x6e, 0xfc, 0x61, 0x9b, 0xae, 0xf8, 0x97 */
+/* }; */
+/* static const uint8_t hmac_sha_512_5[] = { */
+/* 0x41, 0x5f, 0xad, 0x62, 0x71, 0x58, 0x0a, 0x53, */
+/* 0x1d, 0x41, 0x79, 0xbc, 0x89, 0x1d, 0x87, 0xa6 */
+/* }; */
+
+/*
+ * 4.7. Test Case 6
+ *
+ * Test with a key larger than 128 bytes (= block-size of SHA-384 and
+ * SHA-512).
+ *
+ * Key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaa (131 bytes)
+ * Data = 54657374205573696e67204c61726765 ("Test Using Large")
+ * 72205468616e20426c6f636b2d53697a ("r Than Block-Siz")
+ * 65204b6579202d2048617368204b6579 ("e Key - Hash Key")
+ * 204669727374 (" First")
+ *
+ * HMAC-SHA-224 = 95e9a0db962095adaebe9b2d6f0dbce2
+ * d499f112f2d2b7273fa6870e
+ * HMAC-SHA-256 = 60e431591ee0b67f0d8a26aacbf5b77f
+ * 8e0bc6213728c5140546040f0ee37f54
+ * HMAC-SHA-384 = 4ece084485813e9088d2c63a041bc5b4
+ * 4f9ef1012a2b588f3cd11f05033ac4c6
+ * 0c2ef6ab4030fe8296248df163f44952
+ * HMAC-SHA-512 = 80b24263c7c1a3ebb71493c1dd7be8b4
+ * 9b46d1f41b4aeec1121b013783f8f352
+ * 6b56d037e05f2598bd0fd2215d6a1e52
+ * 95e64f73f63f0aec8b915a985d786598
+ */
+static const uint8_t key_6[] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa
+};
+static const uint8_t data_6[] = {
+ 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
+ 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65,
+ 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a,
+ 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
+ 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79,
+ 0x20, 0x46, 0x69, 0x72, 0x73, 0x74
+};
+static const uint8_t hmac_sha_224_6[] = {
+ 0x95, 0xe9, 0xa0, 0xdb, 0x96, 0x20, 0x95, 0xad,
+ 0xae, 0xbe, 0x9b, 0x2d, 0x6f, 0x0d, 0xbc, 0xe2,
+ 0xd4, 0x99, 0xf1, 0x12, 0xf2, 0xd2, 0xb7, 0x27,
+ 0x3f, 0xa6, 0x87, 0x0e
+};
+static const uint8_t hmac_sha_256_6[] = {
+ 0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f,
+ 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f,
+ 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14,
+ 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54
+};
+static const uint8_t hmac_sha_384_6[] = {
+ 0x4e, 0xce, 0x08, 0x44, 0x85, 0x81, 0x3e, 0x90,
+ 0x88, 0xd2, 0xc6, 0x3a, 0x04, 0x1b, 0xc5, 0xb4,
+ 0x4f, 0x9e, 0xf1, 0x01, 0x2a, 0x2b, 0x58, 0x8f,
+ 0x3c, 0xd1, 0x1f, 0x05, 0x03, 0x3a, 0xc4, 0xc6,
+ 0x0c, 0x2e, 0xf6, 0xab, 0x40, 0x30, 0xfe, 0x82,
+ 0x96, 0x24, 0x8d, 0xf1, 0x63, 0xf4, 0x49, 0x52
+};
+static const uint8_t hmac_sha_512_6[] = {
+ 0x80, 0xb2, 0x42, 0x63, 0xc7, 0xc1, 0xa3, 0xeb,
+ 0xb7, 0x14, 0x93, 0xc1, 0xdd, 0x7b, 0xe8, 0xb4,
+ 0x9b, 0x46, 0xd1, 0xf4, 0x1b, 0x4a, 0xee, 0xc1,
+ 0x12, 0x1b, 0x01, 0x37, 0x83, 0xf8, 0xf3, 0x52,
+ 0x6b, 0x56, 0xd0, 0x37, 0xe0, 0x5f, 0x25, 0x98,
+ 0xbd, 0x0f, 0xd2, 0x21, 0x5d, 0x6a, 0x1e, 0x52,
+ 0x95, 0xe6, 0x4f, 0x73, 0xf6, 0x3f, 0x0a, 0xec,
+ 0x8b, 0x91, 0x5a, 0x98, 0x5d, 0x78, 0x65, 0x98
+};
+
+/*
+ * 4.8. Test Case 7
+ *
+ * Test with a key and data that is larger than 128 bytes (= block-size
+ * of SHA-384 and SHA-512).
+ *
+ * Key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ * aaaaaa (131 bytes)
+ * Data = 54686973206973206120746573742075 ("This is a test u")
+ * 73696e672061206c6172676572207468 ("sing a larger th")
+ * 616e20626c6f636b2d73697a65206b65 ("an block-size ke")
+ * 7920616e642061206c61726765722074 ("y and a larger t")
+ * 68616e20626c6f636b2d73697a652064 ("han block-size d")
+ * 6174612e20546865206b6579206e6565 ("ata. The key nee")
+ * 647320746f2062652068617368656420 ("ds to be hashed ")
+ * 6265666f7265206265696e6720757365 ("before being use")
+ * 642062792074686520484d414320616c ("d by the HMAC al")
+ * 676f726974686d2e ("gorithm.")
+ *
+ * HMAC-SHA-224 = 3a854166ac5d9f023f54d517d0b39dbd
+ * 946770db9c2b95c9f6f565d1
+ * HMAC-SHA-256 = 9b09ffa71b942fcb27635fbcd5b0e944
+ * bfdc63644f0713938a7f51535c3a35e2
+ * HMAC-SHA-384 = 6617178e941f020d351e2f254e8fd32c
+ * 602420feb0b8fb9adccebb82461e99c5
+ * a678cc31e799176d3860e6110c46523e
+ * HMAC-SHA-512 = e37b6a775dc87dbaa4dfa9f96e5e3ffd
+ * debd71f8867289865df5a32d20cdc944
+ * b6022cac3c4982b10d5eeb55c3e4de15
+ * 134676fb6de0446065c97440fa8c6a58
+ */
+static const uint8_t key_7[] = {
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa
+};
+static const uint8_t data_7[] = {
+ 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
+ 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75,
+ 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
+ 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68,
+ 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65,
+ 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
+ 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74,
+ 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
+ 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65,
+ 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
+ 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20,
+ 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
+ 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65,
+ 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e
+};
+static const uint8_t hmac_sha_224_7[] = {
+ 0x3a, 0x85, 0x41, 0x66, 0xac, 0x5d, 0x9f, 0x02,
+ 0x3f, 0x54, 0xd5, 0x17, 0xd0, 0xb3, 0x9d, 0xbd,
+ 0x94, 0x67, 0x70, 0xdb, 0x9c, 0x2b, 0x95, 0xc9,
+ 0xf6, 0xf5, 0x65, 0xd1
+};
+static const uint8_t hmac_sha_256_7[] = {
+ 0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb,
+ 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44,
+ 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93,
+ 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2
+};
+static const uint8_t hmac_sha_384_7[] = {
+ 0x66, 0x17, 0x17, 0x8e, 0x94, 0x1f, 0x02, 0x0d,
+ 0x35, 0x1e, 0x2f, 0x25, 0x4e, 0x8f, 0xd3, 0x2c,
+ 0x60, 0x24, 0x20, 0xfe, 0xb0, 0xb8, 0xfb, 0x9a,
+ 0xdc, 0xce, 0xbb, 0x82, 0x46, 0x1e, 0x99, 0xc5,
+ 0xa6, 0x78, 0xcc, 0x31, 0xe7, 0x99, 0x17, 0x6d,
+ 0x38, 0x60, 0xe6, 0x11, 0x0c, 0x46, 0x52, 0x3e
+};
+static const uint8_t hmac_sha_512_7[] = {
+ 0xe3, 0x7b, 0x6a, 0x77, 0x5d, 0xc8, 0x7d, 0xba,
+ 0xa4, 0xdf, 0xa9, 0xf9, 0x6e, 0x5e, 0x3f, 0xfd,
+ 0xde, 0xbd, 0x71, 0xf8, 0x86, 0x72, 0x89, 0x86,
+ 0x5d, 0xf5, 0xa3, 0x2d, 0x20, 0xcd, 0xc9, 0x44,
+ 0xb6, 0x02, 0x2c, 0xac, 0x3c, 0x49, 0x82, 0xb1,
+ 0x0d, 0x5e, 0xeb, 0x55, 0xc3, 0xe4, 0xde, 0x15,
+ 0x13, 0x46, 0x76, 0xfb, 0x6d, 0xe0, 0x44, 0x60,
+ 0x65, 0xc9, 0x74, 0x40, 0xfa, 0x8c, 0x6a, 0x58
+};
+
+/*
+ * Test Case 8
+ *
+ * Test vector from https://csrc.nist.gov/csrc/media/projects/
+ * cryptographic-standards-and-guidelines/documents/examples/hmac_sha224.pdf
+ */
+static const uint8_t key_8[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
+};
+static const uint8_t data_8[] = {
+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x6c, 0x65,
+ 0x6e, 0x3d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x6c,
+ 0x65, 0x6e
+};
+static const uint8_t hmac_sha_224_8[] = {
+ 0xc7, 0x40, 0x5e, 0x3a, 0xe0, 0x58, 0xe8, 0xcd,
+ 0x30, 0xb0, 0x8b, 0x41, 0x40, 0x24, 0x85, 0x81,
+ 0xed, 0x17, 0x4c, 0xb3, 0x4e, 0x12, 0x24, 0xbc,
+ 0xc1, 0xef, 0xc8, 0x1b
+};
+
+/*
+ * Test Case 9
+ *
+ * Test vector from https://csrc.nist.gov/csrc/media/projects/
+ * cryptographic-standards-and-guidelines/documents/examples/hmac_sha256.pdf
+ */
+static const uint8_t key_9[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
+};
+static const uint8_t data_9[] = {
+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x6c, 0x65,
+ 0x6e, 0x3d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x6c,
+ 0x65, 0x6e
+};
+static const uint8_t hmac_sha_256_9[] = {
+ 0x8b, 0xb9, 0xa1, 0xdb, 0x98, 0x06, 0xf2, 0x0d,
+ 0xf7, 0xf7, 0x7b, 0x82, 0x13, 0x8c, 0x79, 0x14,
+ 0xd1, 0x74, 0xd5, 0x9e, 0x13, 0xdc, 0x4d, 0x01,
+ 0x69, 0xc9, 0x05, 0x7b, 0x13, 0x3e, 0x1d, 0x62,
+};
+
+/*
+ * Test Case 10
+ *
+ * Test vector from https://csrc.nist.gov/csrc/media/projects/
+ * cryptographic-standards-and-guidelines/documents/examples/hmac_sha384.pdf
+ */
+static const uint8_t key_10[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+static const uint8_t data_10[] = {
+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x6c, 0x65,
+ 0x6e, 0x3d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x6c,
+ 0x65, 0x6e
+};
+static const uint8_t hmac_sha_384_10[] = {
+ 0x63, 0xc5, 0xda, 0xa5, 0xe6, 0x51, 0x84, 0x7c,
+ 0xa8, 0x97, 0xc9, 0x58, 0x14, 0xab, 0x83, 0x0b,
+ 0xed, 0xed, 0xc7, 0xd2, 0x5e, 0x83, 0xee, 0xf9
+};
+
+/*
+ * Test Case 11
+ *
+ * Test vector from https://csrc.nist.gov/csrc/media/projects/
+ * cryptographic-standards-and-guidelines/documents/examples/hmac_sha512.pdf
+ */
+static const uint8_t key_11[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
+};
+static const uint8_t data_11[] = {
+ 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x20, 0x6b, 0x65, 0x79, 0x6c, 0x65,
+ 0x6e, 0x3d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x6c,
+ 0x65, 0x6e
+};
+static const uint8_t hmac_sha_512_11[] = {
+ 0xfc, 0x25, 0xe2, 0x40, 0x65, 0x8c, 0xa7, 0x85,
+ 0xb7, 0xa8, 0x11, 0xa8, 0xd3, 0xf7, 0xb4, 0xca,
+ 0x48, 0xcf, 0xa2, 0x6a, 0x8a, 0x36, 0x6b, 0xf2,
+ 0xcd, 0x1f, 0x83, 0x6b, 0x05, 0xfc, 0xb0, 0x24
+};
+
+#define HMAC_SHA256_SHA512_TEST_VEC(num) \
+ { num, \
+ key_##num, sizeof(key_##num), \
+ data_##num, sizeof(data_##num), \
+ hmac_sha_224_##num, sizeof(hmac_sha_224_##num), \
+ hmac_sha_256_##num, sizeof(hmac_sha_256_##num), \
+ hmac_sha_384_##num, sizeof(hmac_sha_384_##num), \
+ hmac_sha_512_##num, sizeof(hmac_sha_512_##num) }
+
+#define HMAC_SHA224_TEST_VEC(num) \
+ { num, \
+ key_##num, sizeof(key_##num), \
+ data_##num, sizeof(data_##num), \
+ hmac_sha_224_##num, sizeof(hmac_sha_224_##num), \
+ NULL, 0, \
+ NULL, 0, \
+ NULL, 0 }
+
+#define HMAC_SHA256_TEST_VEC(num) \
+ { num, \
+ key_##num, sizeof(key_##num), \
+ data_##num, sizeof(data_##num), \
+ NULL, 0, \
+ hmac_sha_256_##num, sizeof(hmac_sha_256_##num), \
+ NULL, 0, \
+ NULL, 0 }
+
+#define HMAC_SHA384_TEST_VEC(num) \
+ { num, \
+ key_##num, sizeof(key_##num), \
+ data_##num, sizeof(data_##num), \
+ NULL, 0, \
+ NULL, 0, \
+ hmac_sha_384_##num, sizeof(hmac_sha_384_##num), \
+ NULL, 0 }
+
+#define HMAC_SHA512_TEST_VEC(num) \
+ { num, \
+ key_##num, sizeof(key_##num), \
+ data_##num, sizeof(data_##num), \
+ NULL, 0, \
+ NULL, 0, \
+ NULL, 0, \
+ hmac_sha_512_##num, sizeof(hmac_sha_512_##num) }
+
+static const struct hmac_rfc4231_vector {
+ int test_case_num;
+ const uint8_t *key;
+ size_t key_len;
+ const uint8_t *data;
+ size_t data_len;
+ const uint8_t *hmac_sha224;
+ size_t hmac_sha224_len;
+ const uint8_t *hmac_sha256;
+ size_t hmac_sha256_len;
+ const uint8_t *hmac_sha384;
+ size_t hmac_sha384_len;
+ const uint8_t *hmac_sha512;
+ size_t hmac_sha512_len;
+} hmac_sha256_sha512_vectors[] = {
+ HMAC_SHA256_SHA512_TEST_VEC(1),
+ HMAC_SHA256_SHA512_TEST_VEC(2),
+ HMAC_SHA256_SHA512_TEST_VEC(3),
+ HMAC_SHA256_SHA512_TEST_VEC(4),
+ /* HMAC_SHA256_SHA512_TEST_VEC(5), */
+ HMAC_SHA256_SHA512_TEST_VEC(6),
+ HMAC_SHA256_SHA512_TEST_VEC(7),
+ HMAC_SHA224_TEST_VEC(8),
+ HMAC_SHA256_TEST_VEC(9),
+ HMAC_SHA384_TEST_VEC(10),
+ HMAC_SHA512_TEST_VEC(11),
+};
+
+static int
+hmac_shax_job_ok(const struct hmac_rfc4231_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const int sha_type,
+ const uint8_t *auth,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ const uint8_t *p_digest = NULL;
+ size_t digest_len = 0;
+
+ switch (sha_type) {
+ case 224:
+ p_digest = vec->hmac_sha224;
+ digest_len = vec->hmac_sha224_len;
+ break;
+ case 256:
+ p_digest = vec->hmac_sha256;
+ digest_len = vec->hmac_sha256_len;
+ break;
+ case 384:
+ p_digest = vec->hmac_sha384;
+ digest_len = vec->hmac_sha384_len;
+ break;
+ case 512:
+ p_digest = vec->hmac_sha512;
+ digest_len = vec->hmac_sha512_len;
+ break;
+ default:
+ printf("line:%d wrong SHA type 'SHA-%d' ", __LINE__, sha_type);
+ return 0;
+ break;
+ }
+
+ if (job->status != STS_COMPLETED) {
+ printf("line:%d job error status:%d ", __LINE__, job->status);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + digest_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + digest_len],
+ sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(p_digest, &auth[sizeof_padding], digest_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding], digest_len);
+ hexdump(stderr, "Expected", p_digest, digest_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_hmac_shax(struct MB_MGR *mb_mgr,
+ const struct hmac_rfc4231_vector *vec,
+ const int num_jobs,
+ const int sha_type)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+ uint8_t key[SHA_512_BLOCK_SIZE];
+ uint8_t buf[SHA_512_BLOCK_SIZE];
+ DECLARE_ALIGNED(uint8_t ipad_hash[SHA512_DIGEST_SIZE_IN_BYTES], 16);
+ DECLARE_ALIGNED(uint8_t opad_hash[SHA512_DIGEST_SIZE_IN_BYTES], 16);
+ int key_len = 0;
+ size_t digest_len = 0;
+ size_t block_size = 0;
+
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ switch (sha_type) {
+ case 224:
+ digest_len = vec->hmac_sha224_len;
+ block_size = SHA_256_BLOCK_SIZE;
+ break;
+ case 256:
+ digest_len = vec->hmac_sha256_len;
+ block_size = SHA_256_BLOCK_SIZE;
+ break;
+ case 384:
+ digest_len = vec->hmac_sha384_len;
+ block_size = SHA_384_BLOCK_SIZE;
+ break;
+ case 512:
+ digest_len = vec->hmac_sha512_len;
+ block_size = SHA_512_BLOCK_SIZE;
+ break;
+ default:
+ fprintf(stderr, "Wrong SHA type selection 'SHA-%d'!\n",
+ sha_type);
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ const size_t alloc_len =
+ digest_len + (sizeof(padding) * 2);
+
+ auths[i] = malloc(alloc_len);
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(auths[i], -1, alloc_len);
+ }
+
+ /* prepare the key */
+ memset(key, 0, sizeof(key));
+ if (vec->key_len <= block_size) {
+ memcpy(key, vec->key, vec->key_len);
+ key_len = (int) vec->key_len;
+ } else {
+ switch (sha_type) {
+ case 224:
+ IMB_SHA224(mb_mgr, vec->key, vec->key_len, key);
+ key_len = SHA224_DIGEST_SIZE_IN_BYTES;
+ break;
+ case 256:
+ IMB_SHA256(mb_mgr, vec->key, vec->key_len, key);
+ key_len = SHA256_DIGEST_SIZE_IN_BYTES;
+ break;
+ case 384:
+ IMB_SHA384(mb_mgr, vec->key, vec->key_len, key);
+ key_len = SHA384_DIGEST_SIZE_IN_BYTES;
+ break;
+ case 512:
+ IMB_SHA512(mb_mgr, vec->key, vec->key_len, key);
+ key_len = SHA512_DIGEST_SIZE_IN_BYTES;
+ break;
+ default:
+ fprintf(stderr, "Wrong SHA type selection 'SHA-%d'!\n",
+ sha_type);
+ goto end;
+ }
+ }
+
+ /* compute ipad hash */
+ memset(buf, 0x36, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+
+ switch (sha_type) {
+ case 224:
+ IMB_SHA224_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+ break;
+ case 256:
+ IMB_SHA256_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+ break;
+ case 384:
+ IMB_SHA384_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+ break;
+ case 512:
+ default:
+ IMB_SHA512_ONE_BLOCK(mb_mgr, buf, ipad_hash);
+ break;
+ }
+
+ /* compute opad hash */
+ memset(buf, 0x5c, sizeof(buf));
+ for (i = 0; i < key_len; i++)
+ buf[i] ^= key[i];
+
+ switch (sha_type) {
+ case 224:
+ IMB_SHA224_ONE_BLOCK(mb_mgr, buf, opad_hash);
+ break;
+ case 256:
+ IMB_SHA256_ONE_BLOCK(mb_mgr, buf, opad_hash);
+ break;
+ case 384:
+ IMB_SHA384_ONE_BLOCK(mb_mgr, buf, opad_hash);
+ break;
+ case 512:
+ default:
+ IMB_SHA512_ONE_BLOCK(mb_mgr, buf, opad_hash);
+ break;
+ }
+
+ /* empty the manager */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->aes_enc_key_expanded = NULL;
+ job->aes_dec_key_expanded = NULL;
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->dst = NULL;
+ job->aes_key_len_in_bytes = 0;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = digest_len;
+ job->iv = NULL;
+ job->iv_len_in_bytes = 0;
+ job->src = vec->data;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = 0;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = vec->data_len;
+ job->u.HMAC._hashed_auth_key_xor_ipad = ipad_hash;
+ job->u.HMAC._hashed_auth_key_xor_opad = opad_hash;
+ job->cipher_mode = NULL_CIPHER;
+
+ switch (sha_type) {
+ case 224:
+ job->hash_alg = SHA_224;
+ break;
+ case 256:
+ job->hash_alg = SHA_256;
+ break;
+ case 384:
+ job->hash_alg = SHA_384;
+ break;
+ case 512:
+ default:
+ job->hash_alg = SHA_512;
+ break;
+ }
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ /*
+ * SHANI HMAC-SHA implementation can return a completed
+ * job after 2nd submission
+ */
+ if (num_jobs < 2) {
+ printf("%d Unexpected return from submit_job\n",
+ __LINE__);
+ goto end;
+ }
+ if (!hmac_shax_job_ok(vec, job, sha_type,
+ job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!hmac_shax_job_ok(vec, job, sha_type,
+ job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ for (i = 0; i < num_jobs; i++) {
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_hmac_shax_std_vectors(struct MB_MGR *mb_mgr, const int sha_type,
+ const int num_jobs)
+{
+ const int vectors_cnt =
+ sizeof(hmac_sha256_sha512_vectors) /
+ sizeof(hmac_sha256_sha512_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("HMAC-SHA%d standard test vectors (N jobs = %d):\n",
+ sha_type, num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+ const int flag = (sha_type == 224 &&
+ hmac_sha256_sha512_vectors[idx].hmac_sha224 == NULL) ||
+ (sha_type == 256 &&
+ hmac_sha256_sha512_vectors[idx].hmac_sha256 == NULL) ||
+ (sha_type == 384 &&
+ hmac_sha256_sha512_vectors[idx].hmac_sha384 == NULL) ||
+ (sha_type == 512 &&
+ hmac_sha256_sha512_vectors[idx].hmac_sha512 == NULL);
+#ifdef DEBUG
+ printf("[%d/%d] RFC4231 Test Case %d key_len:%d data_len:%d\n",
+ vect, vectors_cnt,
+ hmac_sha256_sha512_vectors[idx].test_case_num,
+ (int) hmac_sha256_sha512_vectors[idx].key_len,
+ (int) hmac_sha256_sha512_vectors[idx].data_len);
+#else
+ printf(".");
+#endif
+
+#ifdef DEBUG
+ if (flag)
+ printf("Skipped vector %d, N/A for HMAC-SHA%d\n",
+ vect, sha_type);
+#endif
+ if (flag)
+ continue;
+
+ if (test_hmac_shax(mb_mgr, &hmac_sha256_sha512_vectors[idx],
+ num_jobs, sha_type)) {
+ printf("error #%d\n", vect);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+hmac_sha256_sha512_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ const int sha_types_tab[] = {
+ 224, 256, 384, 512
+ };
+ const int num_jobs_tab[] = {
+ 1, 3, 4, 5, 7, 8, 9, 15, 16, 17
+ };
+ unsigned i, j;
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ for (i = 0; i < DIM(sha_types_tab); i++)
+ for (j = 0; j < DIM(num_jobs_tab); j++)
+ errors += test_hmac_shax_std_vectors(mb_mgr,
+ sha_types_tab[i],
+ num_jobs_tab[j]);
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/ipsec_xvalid.c b/src/spdk/intel-ipsec-mb/LibTestApp/ipsec_xvalid.c
new file mode 100644
index 000000000..c1f0467a1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/ipsec_xvalid.c
@@ -0,0 +1,2055 @@
+/**********************************************************************
+ Copyright(c) 2019, Intel Corporation All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+**********************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <malloc.h> /* memalign() or _aligned_malloc()/aligned_free() */
+#include "misc.h"
+
+#ifdef _WIN32
+#include <intrin.h>
+#define strdup _strdup
+#define __forceinline static __forceinline
+#define BSWAP64 _byteswap_uint64
+#else
+#include <x86intrin.h>
+#define __forceinline static inline __attribute__((always_inline))
+#define BSWAP64 __builtin_bswap64
+#endif
+
+#include <intel-ipsec-mb.h>
+
+/* maximum size of a test buffer */
+#define JOB_SIZE_TOP (16 * 1024)
+/* min size of a buffer when testing range of buffers */
+#define DEFAULT_JOB_SIZE_MIN 16
+/* max size of a buffer when testing range of buffers */
+#define DEFAULT_JOB_SIZE_MAX (2 * 1024)
+/* number of bytes to increase buffer size when testing range of buffers */
+#define DEFAULT_JOB_SIZE_STEP 16
+
+#define DEFAULT_JOB_ITER 10
+
+#define AAD_SIZE 12
+#define MAX_IV_SIZE 16
+
+/* Maximum key and digest size for SHA-512 */
+#define MAX_KEY_SIZE SHA_512_BLOCK_SIZE
+#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE_IN_BYTES
+
+#define DIM(x) (sizeof(x)/sizeof(x[0]))
+
+#define SEED 0xdeadcafe
+#define PT_PATTERN 0x44444444
+#define KEY_PATTERN 0x66666666
+#define TAG_PATTERN 0x77777777
+#define STACK_DEPTH 8192
+
+enum arch_type_e {
+ ARCH_SSE = 0,
+ ARCH_AESNI_EMU,
+ ARCH_AVX,
+ ARCH_AVX2,
+ ARCH_AVX512,
+ NUM_ARCHS
+};
+
+/* Struct storing cipher parameters */
+struct params_s {
+ JOB_CIPHER_MODE cipher_mode; /* CBC, CNTR, DES, GCM etc. */
+ JOB_HASH_ALG hash_alg; /* SHA-1 or others... */
+ uint32_t key_size;
+ uint32_t buf_size;
+ uint64_t aad_size;
+ uint32_t num_sizes;
+};
+
+/* Struct storing all expanded keys */
+struct cipher_auth_keys {
+ uint8_t temp_buf[SHA_512_BLOCK_SIZE];
+ DECLARE_ALIGNED(uint32_t dust[15 * 4], 16);
+ uint8_t ipad[SHA512_DIGEST_SIZE_IN_BYTES];
+ uint8_t opad[SHA512_DIGEST_SIZE_IN_BYTES];
+ DECLARE_ALIGNED(uint32_t k1_expanded[15 * 4], 16);
+ DECLARE_ALIGNED(uint8_t k2[16], 16);
+ DECLARE_ALIGNED(uint8_t k3[16], 16);
+ DECLARE_ALIGNED(uint32_t enc_keys[15 * 4], 16);
+ DECLARE_ALIGNED(uint32_t dec_keys[15 * 4], 16);
+ DECLARE_ALIGNED(struct gcm_key_data gdata_key, 64);
+};
+
+/* Struct storing all necessary data for crypto operations */
+struct data {
+ uint8_t test_buf[JOB_SIZE_TOP];
+ uint8_t src_dst_buf[JOB_SIZE_TOP];
+ uint8_t aad[AAD_SIZE];
+ uint8_t in_digest[MAX_DIGEST_SIZE];
+ uint8_t out_digest[MAX_DIGEST_SIZE];
+ uint8_t iv[MAX_IV_SIZE];
+ uint8_t key[MAX_KEY_SIZE];
+ struct cipher_auth_keys enc_keys;
+ struct cipher_auth_keys dec_keys;
+};
+
+struct custom_job_params {
+ JOB_CIPHER_MODE cipher_mode; /* CBC, CNTR, DES, GCM etc. */
+ JOB_HASH_ALG hash_alg; /* SHA-1 or others... */
+ uint32_t key_size;
+};
+
+union params {
+ enum arch_type_e arch_type;
+ struct custom_job_params job_params;
+};
+
+struct str_value_mapping {
+ const char *name;
+ union params values;
+};
+
+struct str_value_mapping arch_str_map[] = {
+ {.name = "SSE", .values.arch_type = ARCH_SSE },
+ {.name = "AESNI_EMU", .values.arch_type = ARCH_AESNI_EMU },
+ {.name = "AVX", .values.arch_type = ARCH_AVX },
+ {.name = "AVX2", .values.arch_type = ARCH_AVX2 },
+ {.name = "AVX512", .values.arch_type = ARCH_AVX512 }
+};
+
+struct str_value_mapping cipher_algo_str_map[] = {
+ {
+ .name = "aes-cbc-128",
+ .values.job_params = {
+ .cipher_mode = CBC,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-cbc-192",
+ .values.job_params = {
+ .cipher_mode = CBC,
+ .key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-cbc-256",
+ .values.job_params = {
+ .cipher_mode = CBC,
+ .key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-128",
+ .values.job_params = {
+ .cipher_mode = CNTR,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-192",
+ .values.job_params = {
+ .cipher_mode = CNTR,
+ .key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-256",
+ .values.job_params = {
+ .cipher_mode = CNTR,
+ .key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-128",
+ .values.job_params = {
+ .cipher_mode = CNTR_BITLEN,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-192",
+ .values.job_params = {
+ .cipher_mode = CNTR_BITLEN,
+ .key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ctr-bit-256",
+ .values.job_params = {
+ .cipher_mode = CNTR_BITLEN,
+ .key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-128",
+ .values.job_params = {
+ .cipher_mode = ECB,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-192",
+ .values.job_params = {
+ .cipher_mode = ECB,
+ .key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-ecb-256",
+ .values.job_params = {
+ .cipher_mode = ECB,
+ .key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-docsis",
+ .values.job_params = {
+ .cipher_mode = DOCSIS_SEC_BPI,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "des-docsis",
+ .values.job_params = {
+ .cipher_mode = DOCSIS_DES,
+ .key_size = 8
+ }
+ },
+ {
+ .name = "des-cbc",
+ .values.job_params = {
+ .cipher_mode = DES,
+ .key_size = 8
+ }
+ },
+ {
+ .name = "3des-cbc",
+ .values.job_params = {
+ .cipher_mode = DES3,
+ .key_size = 24
+ }
+ },
+ {
+ .name = "null",
+ .values.job_params = {
+ .cipher_mode = NULL_CIPHER,
+ .key_size = 0
+ }
+ }
+};
+
+struct str_value_mapping hash_algo_str_map[] = {
+ {
+ .name = "sha1-hmac",
+ .values.job_params = {
+ .hash_alg = SHA1
+ }
+ },
+ {
+ .name = "sha224-hmac",
+ .values.job_params = {
+ .hash_alg = SHA_224
+ }
+ },
+ {
+ .name = "sha256-hmac",
+ .values.job_params = {
+ .hash_alg = SHA_256
+ }
+ },
+ {
+ .name = "sha384-hmac",
+ .values.job_params = {
+ .hash_alg = SHA_384
+ }
+ },
+ {
+ .name = "sha512-hmac",
+ .values.job_params = {
+ .hash_alg = SHA_512
+ }
+ },
+ {
+ .name = "aes-xcbc",
+ .values.job_params = {
+ .hash_alg = AES_XCBC
+ }
+ },
+ {
+ .name = "md5-hmac",
+ .values.job_params = {
+ .hash_alg = MD5
+ }
+ },
+ {
+ .name = "aes-cmac",
+ .values.job_params = {
+ .hash_alg = AES_CMAC
+ }
+ },
+ {
+ .name = "null",
+ .values.job_params = {
+ .hash_alg = NULL_HASH
+ }
+ },
+ {
+ .name = "aes-cmac-bitlen",
+ .values.job_params = {
+ .hash_alg = AES_CMAC_BITLEN
+ }
+ },
+ {
+ .name = "sha1",
+ .values.job_params = {
+ .hash_alg = PLAIN_SHA1
+ }
+ },
+ {
+ .name = "sha224",
+ .values.job_params = {
+ .hash_alg = PLAIN_SHA_224
+ }
+ },
+ {
+ .name = "sha256",
+ .values.job_params = {
+ .hash_alg = PLAIN_SHA_256
+ }
+ },
+ {
+ .name = "sha384",
+ .values.job_params = {
+ .hash_alg = PLAIN_SHA_384
+ }
+ },
+ {
+ .name = "sha512",
+ .values.job_params = {
+ .hash_alg = PLAIN_SHA_512
+ }
+ },
+};
+
+struct str_value_mapping aead_algo_str_map[] = {
+ {
+ .name = "aes-gcm-128",
+ .values.job_params = {
+ .cipher_mode = GCM,
+ .hash_alg = AES_GMAC,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "aes-gcm-192",
+ .values.job_params = {
+ .cipher_mode = GCM,
+ .hash_alg = AES_GMAC,
+ .key_size = AES_192_BYTES
+ }
+ },
+ {
+ .name = "aes-gcm-256",
+ .values.job_params = {
+ .cipher_mode = GCM,
+ .hash_alg = AES_GMAC,
+ .key_size = AES_256_BYTES
+ }
+ },
+ {
+ .name = "aes-ccm-128",
+ .values.job_params = {
+ .cipher_mode = CCM,
+ .hash_alg = AES_CCM,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "pon-128",
+ .values.job_params = {
+ .cipher_mode = PON_AES_CNTR,
+ .hash_alg = PON_CRC_BIP,
+ .key_size = AES_128_BYTES
+ }
+ },
+ {
+ .name = "pon-128-no-ctr",
+ .values.job_params = {
+ .cipher_mode = PON_AES_CNTR,
+ .hash_alg = PON_CRC_BIP,
+ .key_size = 0
+ }
+ },
+};
+
+/* This struct stores all information about performed test case */
+struct variant_s {
+ uint32_t arch;
+ struct params_s params;
+ uint64_t *avg_times;
+};
+
+const uint8_t auth_tag_length_bytes[19] = {
+ 12, /* SHA1 */
+ 14, /* SHA_224 */
+ 16, /* SHA_256 */
+ 24, /* SHA_384 */
+ 32, /* SHA_512 */
+ 12, /* AES_XCBC */
+ 12, /* MD5 */
+ 0, /* NULL_HASH */
+#ifndef NO_GCM
+ 16, /* AES_GMAC */
+#endif
+ 0, /* CUSTOM HASH */
+ 16, /* AES_CCM */
+ 16, /* AES_CMAC */
+ 20, /* PLAIN_SHA1 */
+ 28, /* PLAIN_SHA_224 */
+ 32, /* PLAIN_SHA_256 */
+ 48, /* PLAIN_SHA_384 */
+ 64, /* PLAIN_SHA_512 */
+ 4, /* AES_CMAC_BITLEN (3GPP) */
+ 8, /* PON */
+};
+
+/* Minimum, maximum and step values of key sizes */
+const uint8_t key_sizes[13][3] = {
+ {16, 32, 8}, /* CBC */
+ {16, 32, 8}, /* CNTR */
+ {0, 0, 1}, /* NULL */
+ {16, 16, 1}, /* DOCSIS_SEC_BPI */
+#ifndef NO_GCM
+ {16, 32, 8}, /* GCM */
+#endif
+ {0, 0, 1}, /* CUSTOM_CIPHER */
+ {8, 8, 1}, /* DES */
+ {8, 8, 1}, /* DOCSIS_DES */
+ {16, 16, 1}, /* CCM */
+ {24, 24, 1}, /* DES3 */
+ {16, 16, 1}, /* PON_AES_CNTR */
+ {16, 32, 8}, /* ECB */
+ {16, 32, 8}, /* CNTR_BITLEN */
+};
+
+uint8_t custom_test = 0;
+uint8_t verbose = 0;
+
+enum range {
+ RANGE_MIN = 0,
+ RANGE_STEP,
+ RANGE_MAX,
+ NUM_RANGE
+};
+
+uint32_t job_sizes[NUM_RANGE] = {DEFAULT_JOB_SIZE_MIN,
+ DEFAULT_JOB_SIZE_STEP,
+ DEFAULT_JOB_SIZE_MAX};
+uint32_t job_iter = DEFAULT_JOB_ITER;
+
+struct custom_job_params custom_job_params = {
+ .cipher_mode = NULL_CIPHER,
+ .hash_alg = NULL_HASH,
+ .key_size = 0
+};
+
+/* AESNI_EMU disabled by default */
+uint8_t enc_archs[NUM_ARCHS] = {1, 0, 1, 1, 1};
+uint8_t dec_archs[NUM_ARCHS] = {1, 0, 1, 1, 1};
+
+uint64_t flags = 0; /* flags passed to alloc_mb_mgr() */
+
+/** Generate random buffer */
+static void
+generate_random_buf(uint8_t *buf, const uint32_t length)
+{
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ buf[i] = (uint8_t) rand();
+}
+
+/*
+ * Searches across a block of memory if a pattern is present
+ * (indicating there is some left over sensitive data)
+ *
+ * Returns 0 if pattern is present or -1 if not present
+ */
+static int
+search_patterns(const void *ptr, const size_t mem_size)
+{
+ const uint8_t *ptr8 = (const uint8_t *) ptr;
+ size_t i;
+
+ if (mem_size < 4)
+ return -1;
+
+ for (i = 0; i <= (mem_size - 4); i++) {
+ const uint32_t string = ((const uint32_t *) ptr8)[0];
+ int ret = -1;
+
+ if (string == KEY_PATTERN) {
+ fprintf(stderr, "Part of KEY is present\n");
+ ret = 0;
+ }
+ if (string == TAG_PATTERN) {
+ fprintf(stderr, "Part of TAG is present\n");
+ ret = 0;
+ }
+ if (string == PT_PATTERN) {
+ fprintf(stderr,
+ "Part of plain/ciphertext is present\n");
+ ret = 0;
+ }
+ if (ret == 0) {
+ fprintf(stderr, "Offset = %zu\n", i);
+ return 0;
+ }
+ ptr8++;
+ }
+
+ return -1;
+}
+
+static void
+byte_hexdump(const char *message, const uint8_t *ptr, const uint32_t len)
+{
+ uint32_t ctr;
+
+ printf("%s:\n", message);
+ for (ctr = 0; ctr < len; ctr++) {
+ printf("0x%02X ", ptr[ctr] & 0xff);
+ if (!((ctr + 1) % 16))
+ printf("\n");
+ }
+ printf("\n");
+ printf("\n");
+};
+
+static void
+print_algo_info(const struct params_s *params)
+{
+ struct custom_job_params *job_params;
+ uint32_t i;
+
+ for (i = 0; i < DIM(aead_algo_str_map); i++) {
+ job_params = &aead_algo_str_map[i].values.job_params;
+ if (job_params->cipher_mode == params->cipher_mode &&
+ job_params->hash_alg == params->hash_alg &&
+ job_params->key_size == params->key_size) {
+ printf("AEAD algo = %s\n", aead_algo_str_map[i].name);
+ return;
+ }
+ }
+
+ for (i = 0; i < DIM(cipher_algo_str_map); i++) {
+ job_params = &cipher_algo_str_map[i].values.job_params;
+ if (job_params->cipher_mode == params->cipher_mode &&
+ job_params->key_size == params->key_size) {
+ printf("Cipher algo = %s ",
+ cipher_algo_str_map[i].name);
+ break;
+ }
+ }
+ for (i = 0; i < DIM(hash_algo_str_map); i++) {
+ job_params = &hash_algo_str_map[i].values.job_params;
+ if (job_params->hash_alg == params->hash_alg) {
+ printf("Hash algo = %s\n", hash_algo_str_map[i].name);
+ break;
+ }
+ }
+}
+
+static void
+print_arch_info(const enum arch_type_e arch)
+{
+ uint32_t i;
+
+ for (i = 0; i < DIM(arch_str_map); i++) {
+ if (arch_str_map[i].values.arch_type == arch)
+ printf("Architecture = %s\n",
+ arch_str_map[i].name);
+ }
+}
+
+static int
+fill_job(JOB_AES_HMAC *job, const struct params_s *params,
+ uint8_t *buf, uint8_t *digest, const uint8_t *aad,
+ const uint32_t buf_size, const uint8_t tag_size,
+ JOB_CIPHER_DIRECTION cipher_dir,
+ struct cipher_auth_keys *keys, uint8_t *iv)
+{
+ static const void *ks_ptr[3];
+ uint32_t *k1_expanded = keys->k1_expanded;
+ uint8_t *k2 = keys->k2;
+ uint8_t *k3 = keys->k3;
+ uint32_t *enc_keys = keys->enc_keys;
+ uint32_t *dec_keys = keys->dec_keys;
+ uint8_t *ipad = keys->ipad;
+ uint8_t *opad = keys->opad;
+ struct gcm_key_data *gdata_key = &keys->gdata_key;
+
+ /* Force partial byte, by substracting 3 bits from the full length */
+ if (params->cipher_mode == CNTR_BITLEN)
+ job->msg_len_to_cipher_in_bits = buf_size * 8 - 3;
+ else
+ job->msg_len_to_cipher_in_bytes = buf_size;
+
+ job->msg_len_to_hash_in_bytes = buf_size;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->iv = iv;
+
+ if (params->cipher_mode == PON_AES_CNTR) {
+ /* Substract XGEM header */
+ job->msg_len_to_cipher_in_bytes -= 8;
+ job->cipher_start_src_offset_in_bytes = 8;
+ /* If no crypto needed, set msg_len_to_cipher to 0 */
+ if (params->key_size == 0)
+ job->msg_len_to_cipher_in_bytes = 0;
+ }
+
+ /* In-place operation */
+ job->src = buf;
+ job->dst = buf + job->cipher_start_src_offset_in_bytes;
+ job->auth_tag_output = digest;
+
+ job->hash_alg = params->hash_alg;
+ switch (params->hash_alg) {
+ case AES_XCBC:
+ job->u.XCBC._k1_expanded = k1_expanded;
+ job->u.XCBC._k2 = k2;
+ job->u.XCBC._k3 = k3;
+ break;
+ case AES_CMAC:
+ job->u.CMAC._key_expanded = k1_expanded;
+ job->u.CMAC._skey1 = k2;
+ job->u.CMAC._skey2 = k3;
+ break;
+ case AES_CMAC_BITLEN:
+ job->u.CMAC._key_expanded = k1_expanded;
+ job->u.CMAC._skey1 = k2;
+ job->u.CMAC._skey2 = k3;
+ /*
+ * CMAC bit level version is done in bits (length is
+ * converted to bits and it is decreased by 4 bits,
+ * to force the CMAC bitlen path)
+ */
+ job->msg_len_to_hash_in_bits =
+ (job->msg_len_to_hash_in_bytes * 8) - 4;
+ break;
+ case SHA1:
+ case SHA_224:
+ case SHA_256:
+ case SHA_384:
+ case SHA_512:
+ case MD5:
+ /* HMAC hash alg is SHA1 or MD5 */
+ job->u.HMAC._hashed_auth_key_xor_ipad =
+ (uint8_t *) ipad;
+ job->u.HMAC._hashed_auth_key_xor_opad =
+ (uint8_t *) opad;
+ break;
+ case PON_CRC_BIP:
+ case NULL_HASH:
+ case AES_GMAC:
+ case AES_CCM:
+ case PLAIN_SHA1:
+ case PLAIN_SHA_224:
+ case PLAIN_SHA_256:
+ case PLAIN_SHA_384:
+ case PLAIN_SHA_512:
+ /* No operation needed */
+ break;
+ default:
+ printf("Unsupported hash algorithm\n");
+ return -1;
+ }
+
+ job->auth_tag_output_len_in_bytes = tag_size;
+
+ job->cipher_direction = cipher_dir;
+
+ if (params->cipher_mode == NULL_CIPHER) {
+ job->chain_order = HASH_CIPHER;
+ } else if (params->cipher_mode == CCM) {
+ if (job->cipher_direction == ENCRYPT)
+ job->chain_order = HASH_CIPHER;
+ else
+ job->chain_order = CIPHER_HASH;
+ } else {
+ if (job->cipher_direction == ENCRYPT)
+ job->chain_order = CIPHER_HASH;
+ else
+ job->chain_order = HASH_CIPHER;
+ }
+
+ /* Translating enum to the API's one */
+ job->cipher_mode = params->cipher_mode;
+ job->aes_key_len_in_bytes = params->key_size;
+
+ switch (job->cipher_mode) {
+ case CBC:
+ case DOCSIS_SEC_BPI:
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->iv_len_in_bytes = 16;
+ break;
+ case PON_AES_CNTR:
+ case CNTR:
+ case CNTR_BITLEN:
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = enc_keys;
+ job->iv_len_in_bytes = 16;
+ break;
+ case GCM:
+ job->aes_enc_key_expanded = gdata_key;
+ job->aes_dec_key_expanded = gdata_key;
+ job->u.GCM.aad_len_in_bytes = params->aad_size;
+ job->u.GCM.aad = aad;
+ job->iv_len_in_bytes = 12;
+ break;
+ case CCM:
+ job->msg_len_to_cipher_in_bytes = buf_size;
+ job->msg_len_to_hash_in_bytes = buf_size;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->cipher_start_src_offset_in_bytes = 0;
+ job->u.CCM.aad_len_in_bytes = params->aad_size;
+ job->u.CCM.aad = aad;
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = enc_keys;
+ job->iv_len_in_bytes = 13;
+ break;
+ case DES:
+ case DOCSIS_DES:
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = enc_keys;
+ job->iv_len_in_bytes = 8;
+ break;
+ case DES3:
+ ks_ptr[0] = ks_ptr[1] = ks_ptr[2] = enc_keys;
+ job->aes_enc_key_expanded = ks_ptr;
+ job->aes_dec_key_expanded = ks_ptr;
+ job->iv_len_in_bytes = 8;
+ break;
+ case ECB:
+ job->aes_enc_key_expanded = enc_keys;
+ job->aes_dec_key_expanded = dec_keys;
+ job->iv_len_in_bytes = 0;
+ break;
+ case NULL_CIPHER:
+ /* No operation needed */
+ break;
+ default:
+ printf("Unsupported cipher mode\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+prepare_keys(MB_MGR *mb_mgr, struct cipher_auth_keys *keys,
+ const uint8_t *key, const struct params_s *params,
+ const unsigned int force_pattern)
+{
+ uint8_t *buf = keys->temp_buf;
+ uint32_t *dust = keys->dust;
+ uint32_t *k1_expanded = keys->k1_expanded;
+ uint8_t *k2 = keys->k2;
+ uint8_t *k3 = keys->k3;
+ uint32_t *enc_keys = keys->enc_keys;
+ uint32_t *dec_keys = keys->dec_keys;
+ uint8_t *ipad = keys->ipad;
+ uint8_t *opad = keys->opad;
+ struct gcm_key_data *gdata_key = &keys->gdata_key;
+ uint8_t i;
+
+ /* Set all expanded keys to KEY_PATTERN if flag is set */
+ if (force_pattern) {
+ switch (params->hash_alg) {
+ case AES_XCBC:
+ memset(k1_expanded, KEY_PATTERN,
+ sizeof(keys->k1_expanded));
+ break;
+ case AES_CMAC:
+ case AES_CMAC_BITLEN:
+ memset(k1_expanded, KEY_PATTERN,
+ sizeof(keys->k1_expanded));
+ memset(k2, KEY_PATTERN, sizeof(keys->k2));
+ memset(k3, KEY_PATTERN, sizeof(keys->k3));
+ break;
+ case SHA1:
+ case SHA_224:
+ case SHA_256:
+ case SHA_384:
+ case SHA_512:
+ case MD5:
+ memset(ipad, KEY_PATTERN, sizeof(keys->ipad));
+ memset(opad, KEY_PATTERN, sizeof(keys->opad));
+ break;
+ case AES_CCM:
+ case AES_GMAC:
+ case NULL_HASH:
+ case PLAIN_SHA1:
+ case PLAIN_SHA_224:
+ case PLAIN_SHA_256:
+ case PLAIN_SHA_384:
+ case PLAIN_SHA_512:
+ case PON_CRC_BIP:
+ /* No operation needed */
+ break;
+ default:
+ fprintf(stderr, "Unsupported hash algo\n");
+ return -1;
+ }
+
+ switch (params->cipher_mode) {
+ case GCM:
+ memset(gdata_key, KEY_PATTERN, sizeof(keys->gdata_key));
+ break;
+ case PON_AES_CNTR:
+ case CBC:
+ case CCM:
+ case CNTR:
+ case CNTR_BITLEN:
+ case DOCSIS_SEC_BPI:
+ case ECB:
+ memset(enc_keys, KEY_PATTERN, sizeof(keys->enc_keys));
+ memset(dec_keys, KEY_PATTERN, sizeof(keys->dec_keys));
+ break;
+ case DES:
+ case DES3:
+ case DOCSIS_DES:
+ memset(enc_keys, KEY_PATTERN, sizeof(keys->enc_keys));
+ break;
+ case NULL_CIPHER:
+ /* No operation needed */
+ break;
+ default:
+ fprintf(stderr, "Unsupported cipher mode\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ switch (params->hash_alg) {
+ case AES_XCBC:
+ IMB_AES_XCBC_KEYEXP(mb_mgr, key, k1_expanded, k2, k3);
+ break;
+ case AES_CMAC:
+ case AES_CMAC_BITLEN:
+ IMB_AES_KEYEXP_128(mb_mgr, key, k1_expanded, dust);
+ IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, k1_expanded, k2, k3);
+ break;
+ case SHA1:
+ /* compute ipad hash */
+ memset(buf, 0x36, SHA1_BLOCK_SIZE);
+ for (i = 0; i < SHA1_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, SHA1_BLOCK_SIZE);
+ for (i = 0; i < SHA1_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA1_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case SHA_224:
+ /* compute ipad hash */
+ memset(buf, 0x36, SHA_256_BLOCK_SIZE);
+ for (i = 0; i < SHA_256_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA224_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, SHA_256_BLOCK_SIZE);
+ for (i = 0; i < SHA_256_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA224_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case SHA_256:
+ /* compute ipad hash */
+ memset(buf, 0x36, SHA_256_BLOCK_SIZE);
+ for (i = 0; i < SHA_256_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA256_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, SHA_256_BLOCK_SIZE);
+ for (i = 0; i < SHA_256_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA256_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case SHA_384:
+ /* compute ipad hash */
+ memset(buf, 0x36, SHA_384_BLOCK_SIZE);
+ for (i = 0; i < SHA_384_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA384_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, SHA_384_BLOCK_SIZE);
+ for (i = 0; i < SHA_384_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA384_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case SHA_512:
+ /* compute ipad hash */
+ memset(buf, 0x36, SHA_512_BLOCK_SIZE);
+ for (i = 0; i < SHA_512_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA512_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, SHA_512_BLOCK_SIZE);
+ for (i = 0; i < SHA_512_BLOCK_SIZE; i++)
+ buf[i] ^= key[i];
+ IMB_SHA512_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case MD5:
+ /* compute ipad hash */
+ memset(buf, 0x36, 64);
+ for (i = 0; i < 64; i++)
+ buf[i] ^= key[i];
+ IMB_MD5_ONE_BLOCK(mb_mgr, buf, ipad);
+
+ /* compute opad hash */
+ memset(buf, 0x5c, 64);
+ for (i = 0; i < 64; i++)
+ buf[i] ^= key[i];
+ IMB_MD5_ONE_BLOCK(mb_mgr, buf, opad);
+
+ break;
+ case AES_CCM:
+ case AES_GMAC:
+ case NULL_HASH:
+ case PLAIN_SHA1:
+ case PLAIN_SHA_224:
+ case PLAIN_SHA_256:
+ case PLAIN_SHA_384:
+ case PLAIN_SHA_512:
+ case PON_CRC_BIP:
+ /* No operation needed */
+ break;
+ default:
+ fprintf(stderr, "Unsupported hash algo\n");
+ return -1;
+ }
+
+ switch (params->cipher_mode) {
+ case GCM:
+ switch (params->key_size) {
+ case AES_128_BYTES:
+ IMB_AES128_GCM_PRE(mb_mgr, key, gdata_key);
+ break;
+ case AES_192_BYTES:
+ IMB_AES192_GCM_PRE(mb_mgr, key, gdata_key);
+ break;
+ case AES_256_BYTES:
+ IMB_AES256_GCM_PRE(mb_mgr, key, gdata_key);
+ break;
+ default:
+ fprintf(stderr, "Wrong key size\n");
+ return -1;
+ }
+ break;
+ case PON_AES_CNTR:
+ switch (params->key_size) {
+ case 16:
+ IMB_AES_KEYEXP_128(mb_mgr, key, enc_keys, dec_keys);
+ break;
+ case 0:
+ break;
+ default:
+ fprintf(stderr, "Wrong key size\n");
+ return -1;
+ }
+ break;
+ case CBC:
+ case CCM:
+ case CNTR:
+ case CNTR_BITLEN:
+ case DOCSIS_SEC_BPI:
+ case ECB:
+ switch (params->key_size) {
+ case AES_128_BYTES:
+ IMB_AES_KEYEXP_128(mb_mgr, key, enc_keys, dec_keys);
+ break;
+ case AES_192_BYTES:
+ IMB_AES_KEYEXP_192(mb_mgr, key, enc_keys, dec_keys);
+ break;
+ case AES_256_BYTES:
+ IMB_AES_KEYEXP_256(mb_mgr, key, enc_keys, dec_keys);
+ break;
+ default:
+ fprintf(stderr, "Wrong key size\n");
+ return -1;
+ }
+ break;
+ case DES:
+ case DES3:
+ case DOCSIS_DES:
+ des_key_schedule((uint64_t *) enc_keys, key);
+ break;
+ case NULL_CIPHER:
+ /* No operation needed */
+ break;
+ default:
+ fprintf(stderr, "Unsupported cipher mode\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Modify the test buffer to set the HEC value and CRC, so the final
+ * decrypted message can be compared against the test buffer */
+static int
+modify_pon_test_buf(uint8_t *test_buf, const struct params_s *params,
+ const JOB_AES_HMAC *job, const uint64_t xgem_hdr)
+{
+ /* Set plaintext CRC in test buffer for PON */
+ uint32_t *buf32 = (uint32_t *) &test_buf[8 + params->buf_size - 4];
+ uint64_t *buf64 = (uint64_t *) test_buf;
+ const uint32_t *tag32 = (uint32_t *) job->auth_tag_output;
+ const uint64_t hec_mask = BSWAP64(0xfffffffffffe000);
+ const uint64_t xgem_hdr_out = ((const uint64_t *)job->src)[0];
+
+ if (params->buf_size >= 5)
+ buf32[0] = tag32[1];
+
+ /* Check if any bits apart from HEC are modified */
+ if ((xgem_hdr_out & hec_mask) != (xgem_hdr & hec_mask)) {
+ fprintf(stderr, "XGEM header overwritten outside HEC\n");
+ fprintf(stderr, "Original XGEM header: %"PRIx64"\n",
+ xgem_hdr & hec_mask );
+ fprintf(stderr, "Output XGEM header: %"PRIx64"\n",
+ xgem_hdr_out & hec_mask);
+ return -1;
+ }
+
+ /* Modify original XGEM header to include calculated HEC */
+ buf64[0] = xgem_hdr_out;
+
+ return 0;
+}
+
+/*
+ * Checks for sensitive information in registers, stack and MB_MGR
+ * (in this order, to try to minimize pollution of the data left out
+ * after the job completion, due to these actual checks).
+ *
+ * Returns -1 if sensitive information was found or 0 if not.
+ */
+static int
+perform_safe_checks(MB_MGR *mgr, const enum arch_type_e arch,
+ const char *dir)
+{
+ uint8_t *rsp_ptr;
+ uint32_t simd_size = 0;
+
+ dump_gps();
+ switch (arch) {
+ case ARCH_SSE:
+ case ARCH_AESNI_EMU:
+ dump_xmms_sse();
+ simd_size = XMM_MEM_SIZE;
+ break;
+ case ARCH_AVX:
+ dump_xmms_avx();
+ simd_size = XMM_MEM_SIZE;
+ break;
+ case ARCH_AVX2:
+ dump_ymms();
+ simd_size = YMM_MEM_SIZE;
+ break;
+ case ARCH_AVX512:
+ dump_zmms();
+ simd_size = ZMM_MEM_SIZE;
+ break;
+ default:
+ fprintf(stderr,
+ "Error getting the architecture\n");
+ return -1;
+ }
+ if (search_patterns(gps, GP_MEM_SIZE) == 0) {
+ fprintf(stderr, "Pattern found in GP registers "
+ "after %s data\n", dir);
+ return -1;
+ }
+ if (search_patterns(simd_regs, simd_size) == 0) {
+ fprintf(stderr, "Pattern found in SIMD "
+ "registers after %s data\n", dir);
+ return -1;
+ }
+ rsp_ptr = rdrsp();
+ if (search_patterns((rsp_ptr - STACK_DEPTH),
+ STACK_DEPTH) == 0) {
+ fprintf(stderr, "Pattern found in stack after "
+ "%s data\n", dir);
+ return -1;
+ }
+ if (search_patterns(mgr, sizeof(MB_MGR)) == 0) {
+ fprintf(stderr, "Pattern found in MB_MGR after "
+ "%s data\n", dir);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+clear_scratch_simd(const enum arch_type_e arch)
+{
+ switch (arch) {
+ case ARCH_SSE:
+ case ARCH_AESNI_EMU:
+ clear_scratch_xmms_sse();
+ break;
+ case ARCH_AVX:
+ clear_scratch_xmms_avx();
+ break;
+ case ARCH_AVX2:
+ clear_scratch_ymms();
+ break;
+ case ARCH_AVX512:
+ clear_scratch_zmms();
+ break;
+ default:
+ fprintf(stderr, "Invalid architecture\n");
+ exit(EXIT_FAILURE);
+ }
+}
+
+/* Performs test using AES_HMAC or DOCSIS */
+static int
+do_test(MB_MGR *enc_mb_mgr, const enum arch_type_e enc_arch,
+ MB_MGR *dec_mb_mgr, const enum arch_type_e dec_arch,
+ const struct params_s *params, struct data *data,
+ const unsigned safe_check)
+{
+ JOB_AES_HMAC *job;
+ uint32_t i;
+ int ret = -1;
+ uint32_t buf_size = params->buf_size;
+ uint8_t tag_size = auth_tag_length_bytes[params->hash_alg - 1];
+ uint64_t xgem_hdr = 0;
+ uint8_t tag_size_to_check = 0;
+ struct cipher_auth_keys *enc_keys = &data->enc_keys;
+ struct cipher_auth_keys *dec_keys = &data->dec_keys;
+ uint8_t *aad = data->aad;
+ uint8_t *iv = data->iv;
+ uint8_t *in_digest = data->in_digest;
+ uint8_t *out_digest = data->out_digest;
+ uint8_t *test_buf = data->test_buf;
+ uint8_t *src_dst_buf = data->src_dst_buf;
+ uint8_t *key = data->key;
+
+ if (params->hash_alg == PON_CRC_BIP) {
+ /* Buf size is XGEM payload, including CRC,
+ * allocate space for XGEM header and padding */
+ buf_size = buf_size + 8;
+ if (buf_size % 8)
+ buf_size = (buf_size + 8) & 0xfffffff8;
+ /* Only first 4 bytes are checked, corresponding to BIP */
+ tag_size_to_check = 4;
+ }
+
+ /* If performing a test searching for sensitive information,
+ * set keys and plaintext to known values,
+ * so they can be searched later on in the MB_MGR structure and stack.
+ * Otherwise, just randomize the data */
+ if (safe_check) {
+ memset(test_buf, PT_PATTERN, buf_size);
+ memset(key, KEY_PATTERN, MAX_KEY_SIZE);
+ } else {
+ generate_random_buf(test_buf, buf_size);
+ generate_random_buf(key, MAX_KEY_SIZE);
+ generate_random_buf(iv, MAX_IV_SIZE);
+ generate_random_buf(aad, AAD_SIZE);
+ }
+
+ /* For PON, construct the XGEM header, setting valid PLI */
+ if (params->hash_alg == PON_CRC_BIP) {
+ /* create XGEM header template */
+ const uint64_t pli = ((params->buf_size) << 2) & 0xffff;
+ uint64_t *p_src = (uint64_t *)test_buf;
+
+ xgem_hdr = ((pli >> 8) & 0xff) | ((pli & 0xff) << 8);
+ p_src[0] = xgem_hdr;
+ }
+
+ /*
+ * Expand/schedule keys.
+ * If checking for sensitive information, first use actual
+ * key expansion functions and check the stack for left over
+ * information and then set a pattern in the expanded key memory
+ * to search for later on.
+ * If not checking for sensitive information, just use the key
+ * expansion functions.
+ */
+ if (safe_check) {
+ uint8_t *rsp_ptr;
+
+ /* Clear scratch registers before expanding keys to prevent
+ * other functions from storing sensitive data in stack */
+ clear_scratch_simd(enc_arch);
+ if (prepare_keys(enc_mb_mgr, enc_keys, key, params, 0) < 0)
+ goto exit;
+
+ rsp_ptr = rdrsp();
+ if (search_patterns((rsp_ptr - STACK_DEPTH),
+ STACK_DEPTH) == 0) {
+ fprintf(stderr, "Pattern found in stack after "
+ "expanding encryption keys\n");
+ goto exit;
+ }
+
+ if (prepare_keys(dec_mb_mgr, dec_keys, key, params, 0) < 0)
+ goto exit;
+
+ rsp_ptr = rdrsp();
+ if (search_patterns((rsp_ptr - STACK_DEPTH),
+ STACK_DEPTH) == 0) {
+ fprintf(stderr, "Pattern found in stack after "
+ "expanding decryption keys\n");
+ goto exit;
+ }
+
+ if (prepare_keys(enc_mb_mgr, enc_keys, key, params, 1) < 0)
+ goto exit;
+
+ if (prepare_keys(enc_mb_mgr, dec_keys, key, params, 1) < 0)
+ goto exit;
+ } else {
+ if (prepare_keys(enc_mb_mgr, enc_keys, key, params, 0) < 0)
+ goto exit;
+
+ if (prepare_keys(enc_mb_mgr, dec_keys, key, params, 0) < 0)
+ goto exit;
+ }
+
+ for (i = 0; i < job_iter; i++) {
+ job = IMB_GET_NEXT_JOB(enc_mb_mgr);
+ /*
+ * Encrypt + generate digest from encrypted message
+ * using architecture under test
+ */
+ memcpy(src_dst_buf, test_buf, buf_size);
+ if (fill_job(job, params, src_dst_buf, in_digest, aad,
+ buf_size, tag_size, ENCRYPT, enc_keys, iv) < 0)
+ goto exit;
+
+ /* Randomize memory for input digest */
+ generate_random_buf(in_digest, tag_size);
+
+ /* Clear scratch registers before submitting job to prevent
+ * other functions from storing sensitive data in stack */
+ if (safe_check)
+ clear_scratch_simd(enc_arch);
+ job = IMB_SUBMIT_JOB(enc_mb_mgr);
+
+ if (!job)
+ job = IMB_FLUSH_JOB(enc_mb_mgr);
+
+ if (!job) {
+ fprintf(stderr, "job not returned\n");
+ goto exit;
+ }
+
+ /* Check that the registers, stack and MB_MGR do not contain any
+ * sensitive information after job is returned */
+ if (safe_check)
+ if (perform_safe_checks(enc_mb_mgr, enc_arch,
+ "encrypting") < 0)
+ goto exit;
+
+ if (job->status != STS_COMPLETED) {
+ fprintf(stderr, "failed job, status:%d\n",
+ job->status);
+ goto exit;
+ }
+
+ if (params->hash_alg == PON_CRC_BIP) {
+ if (modify_pon_test_buf(test_buf, params, job,
+ xgem_hdr) < 0)
+ goto exit;
+ }
+
+ job = IMB_GET_NEXT_JOB(dec_mb_mgr);
+
+ /* Randomize memory for input digest */
+ generate_random_buf(out_digest, tag_size);
+
+ /*
+ * Generate digest from encrypted message and decrypt
+ * using reference architecture
+ */
+ if (fill_job(job, params, src_dst_buf, out_digest, aad,
+ buf_size, tag_size, DECRYPT, dec_keys, iv) < 0)
+ goto exit;
+
+ /* Clear scratch registers before submitting job to prevent
+ * other functions from storing sensitive data in stack */
+ if (safe_check)
+ clear_scratch_simd(dec_arch);
+ job = IMB_SUBMIT_JOB(dec_mb_mgr);
+
+ if (!job)
+ job = IMB_FLUSH_JOB(dec_mb_mgr);
+
+ /* Check that the registers, stack and MB_MGR do not contain any
+ * sensitive information after job is returned */
+ if (safe_check)
+ if (perform_safe_checks(dec_mb_mgr, dec_arch,
+ "decrypting") < 0)
+ goto exit;
+
+ if (!job) {
+ fprintf(stderr, "job not returned\n");
+ goto exit;
+ }
+
+ if (job->status != STS_COMPLETED) {
+ fprintf(stderr, "failed job, status:%d\n",
+ job->status);
+ goto exit;
+ }
+
+ if (params->hash_alg != NULL_HASH &&
+ memcmp(in_digest, out_digest, tag_size_to_check) != 0) {
+ fprintf(stderr,
+ "\nInput and output tags don't match\n");
+ byte_hexdump("Input digest", in_digest,
+ tag_size_to_check);
+ byte_hexdump("Output digest", out_digest,
+ tag_size_to_check);
+ goto exit;
+ }
+
+ if (params->cipher_mode != NULL_CIPHER &&
+ memcmp(src_dst_buf, test_buf, buf_size) != 0) {
+ fprintf(stderr,
+ "\nDecrypted text and plaintext don't match\n");
+ byte_hexdump("Plaintext (orig)", test_buf, buf_size);
+ byte_hexdump("Decrypted msg", src_dst_buf, buf_size);
+ goto exit;
+ }
+
+ if (params->hash_alg == PON_CRC_BIP && params->buf_size > 4) {
+ const uint64_t plen = params->buf_size - 4;
+
+ if (memcmp(src_dst_buf + 8 + plen,
+ out_digest + 4, 4) != 0) {
+ fprintf(stderr, "\nDecrypted CRC and calculated"
+ " CRC don't match\n");
+ byte_hexdump("Decrypted CRC",
+ src_dst_buf + 8 + plen, 4);
+ byte_hexdump("Calculated CRC",
+ out_digest + 4, 4);
+ goto exit;
+ }
+ }
+ }
+
+ ret = 0;
+
+exit:
+ if (ret < 0) {
+ printf("Failures in\n");
+ print_algo_info(params);
+ printf("Encrypting ");
+ print_arch_info(enc_arch);
+ printf("Decrypting ");
+ print_arch_info(dec_arch);
+ printf("Buffer size = %u\n", params->buf_size);
+ printf("Key size = %u\n", params->key_size);
+ printf("Tag size = %u\n", tag_size);
+ }
+
+ return ret;
+}
+
+/* Runs test for each buffer size */
+static void
+process_variant(MB_MGR *enc_mgr, const enum arch_type_e enc_arch,
+ MB_MGR *dec_mgr, const enum arch_type_e dec_arch,
+ struct params_s *params, struct data *variant_data,
+ const unsigned int safe_check)
+{
+ const uint32_t sizes = params->num_sizes;
+ uint32_t sz;
+
+ if (verbose) {
+ printf("Testing ");
+ print_algo_info(params);
+ }
+
+ /* Reset the variant data */
+ memset(variant_data, 0, sizeof(struct data));
+
+ for (sz = 0; sz < sizes; sz++) {
+ const uint32_t buf_size = job_sizes[RANGE_MIN] +
+ (sz * job_sizes[RANGE_STEP]);
+ params->aad_size = AAD_SIZE;
+
+ params->buf_size = buf_size;
+
+ /*
+ * CBC and ECB operation modes do not support lengths which are
+ * non-multiple of block size
+ */
+ if (params->cipher_mode == CBC || params->cipher_mode == ECB)
+ if ((buf_size % AES_BLOCK_SIZE) != 0)
+ continue;
+
+ if (params->cipher_mode == DES || params->cipher_mode == DES3)
+ if ((buf_size % DES_BLOCK_SIZE) != 0)
+ continue;
+
+ /* Check for sensitive data first, then normal cross
+ * architecture validation */
+ if (safe_check && do_test(enc_mgr, enc_arch, dec_mgr, dec_arch,
+ params, variant_data, 1) < 0)
+ exit(EXIT_FAILURE);
+
+ if (do_test(enc_mgr, enc_arch, dec_mgr, dec_arch,
+ params, variant_data, 0) < 0)
+ exit(EXIT_FAILURE);
+
+ }
+}
+
+/* Sets cipher direction and key size */
+static void
+run_test(const enum arch_type_e enc_arch, const enum arch_type_e dec_arch,
+ struct params_s *params, struct data *variant_data,
+ const unsigned int safe_check)
+{
+ MB_MGR *enc_mgr = NULL;
+ MB_MGR *dec_mgr = NULL;
+
+ if (enc_arch == ARCH_AESNI_EMU)
+ enc_mgr = alloc_mb_mgr(flags | IMB_FLAG_AESNI_OFF);
+ else
+ enc_mgr = alloc_mb_mgr(flags);
+
+ if (enc_mgr == NULL) {
+ fprintf(stderr, "MB MGR could not be allocated\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reset the MB MGR structure in case it is allocated with
+ * memory containing the patterns that will be searched later on */
+ if (safe_check)
+ memset(enc_mgr, 0, sizeof(MB_MGR));
+
+ switch (enc_arch) {
+ case ARCH_SSE:
+ case ARCH_AESNI_EMU:
+ init_mb_mgr_sse(enc_mgr);
+ break;
+ case ARCH_AVX:
+ init_mb_mgr_avx(enc_mgr);
+ break;
+ case ARCH_AVX2:
+ init_mb_mgr_avx2(enc_mgr);
+ break;
+ case ARCH_AVX512:
+ init_mb_mgr_avx512(enc_mgr);
+ break;
+ default:
+ fprintf(stderr, "Invalid architecture\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (dec_arch == ARCH_AESNI_EMU)
+ dec_mgr = alloc_mb_mgr(flags | IMB_FLAG_AESNI_OFF);
+ else
+ dec_mgr = alloc_mb_mgr(flags);
+
+ if (dec_mgr == NULL) {
+ fprintf(stderr, "MB MGR could not be allocated\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reset the MB MGR structure in case it is allocated with
+ * memory containing the patterns that will be searched later on */
+ if (safe_check)
+ memset(dec_mgr, 0, sizeof(MB_MGR));
+
+ switch (dec_arch) {
+ case ARCH_SSE:
+ case ARCH_AESNI_EMU:
+ init_mb_mgr_sse(dec_mgr);
+ break;
+ case ARCH_AVX:
+ init_mb_mgr_avx(dec_mgr);
+ break;
+ case ARCH_AVX2:
+ init_mb_mgr_avx2(dec_mgr);
+ break;
+ case ARCH_AVX512:
+ init_mb_mgr_avx512(dec_mgr);
+ break;
+ default:
+ fprintf(stderr, "Invalid architecture\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (custom_test) {
+ params->key_size = custom_job_params.key_size;
+ params->cipher_mode = custom_job_params.cipher_mode;
+ params->hash_alg = custom_job_params.hash_alg;
+ process_variant(enc_mgr, enc_arch, dec_mgr, dec_arch, params,
+ variant_data, safe_check);
+ goto exit;
+ }
+
+ JOB_HASH_ALG hash_alg;
+ JOB_CIPHER_MODE c_mode;
+
+ for (c_mode = CBC; c_mode <= CNTR_BITLEN; c_mode++) {
+ /* Skip CUSTOM_CIPHER */
+ if (c_mode == CUSTOM_CIPHER)
+ continue;
+ params->cipher_mode = c_mode;
+ uint8_t min_sz = key_sizes[c_mode - 1][0];
+ uint8_t max_sz = key_sizes[c_mode - 1][1];
+ uint8_t step_sz = key_sizes[c_mode - 1][2];
+ uint8_t key_sz;
+
+ for (key_sz = min_sz; key_sz <= max_sz; key_sz += step_sz) {
+ params->key_size = key_sz;
+ for (hash_alg = SHA1; hash_alg <= PON_CRC_BIP;
+ hash_alg++) {
+ /* Skip CUSTOM_HASH */
+ if (hash_alg == CUSTOM_HASH)
+ continue;
+
+ /* Skip not supported combinations */
+ if ((c_mode == GCM && hash_alg != AES_GMAC) ||
+ (c_mode != GCM && hash_alg == AES_GMAC))
+ continue;
+ if ((c_mode == CCM && hash_alg != AES_CCM) ||
+ (c_mode != CCM && hash_alg == AES_CCM))
+ continue;
+ if ((c_mode == PON_AES_CNTR &&
+ hash_alg != PON_CRC_BIP) ||
+ (c_mode != PON_AES_CNTR &&
+ hash_alg == PON_CRC_BIP))
+ continue;
+
+ params->hash_alg = hash_alg;
+ process_variant(enc_mgr, enc_arch, dec_mgr,
+ dec_arch, params, variant_data,
+ safe_check);
+ }
+ }
+ }
+
+exit:
+ free_mb_mgr(enc_mgr);
+ free_mb_mgr(dec_mgr);
+}
+
+/* Prepares data structure for test variants storage,
+ * sets test configuration
+ */
+static void
+run_tests(const unsigned int safe_check)
+{
+ struct params_s params;
+ struct data *variant_data = NULL;
+ enum arch_type_e enc_arch, dec_arch;
+ const uint32_t min_size = job_sizes[RANGE_MIN];
+ const uint32_t max_size = job_sizes[RANGE_MAX];
+ const uint32_t step_size = job_sizes[RANGE_STEP];
+
+ params.num_sizes = ((max_size - min_size) / step_size) + 1;
+
+ variant_data = malloc(sizeof(struct data));
+
+ if (variant_data == NULL) {
+ fprintf(stderr, "Test data could not be allocated\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (verbose) {
+ if (min_size == max_size)
+ printf("Testing buffer size = %u bytes\n", min_size);
+ else
+ printf("Testing buffer sizes from %u to %u "
+ "in steps of %u bytes\n",
+ min_size, max_size, step_size);
+ }
+ /* Performing tests for each selected architecture */
+ for (enc_arch = ARCH_SSE; enc_arch < NUM_ARCHS; enc_arch++) {
+ if (enc_archs[enc_arch] == 0)
+ continue;
+ printf("\nEncrypting with ");
+ print_arch_info(enc_arch);
+
+ for (dec_arch = ARCH_SSE; dec_arch < NUM_ARCHS; dec_arch++) {
+ if (dec_archs[dec_arch] == 0)
+ continue;
+ printf("\tDecrypting with ");
+ print_arch_info(dec_arch);
+ run_test(enc_arch, dec_arch, &params, variant_data,
+ safe_check);
+ }
+
+ } /* end for run */
+
+ free(variant_data);
+}
+
+static void usage(void)
+{
+ fprintf(stderr, "Usage: exhaustive_test [args], "
+ "where args are zero or more\n"
+ "-h: print this message\n"
+ "-v: verbose, prints extra information\n"
+ "--enc-arch: encrypting with architecture "
+ "(AESNI_EMU/SSE/AVX/AVX2/AVX512)\n"
+ "--dec-arch: decrypting with architecture "
+ "(AESNI_EMU/SSE/AVX/AVX2/AVX512)\n"
+ "--cipher-algo: Select cipher algorithm to run on the custom "
+ "test\n"
+ "--hash-algo: Select hash algorithm to run on the custom test\n"
+ "--aead-algo: Select AEAD algorithm to run on the custom test\n"
+ "--no-avx512: Don't do AVX512\n"
+ "--no-avx2: Don't do AVX2\n"
+ "--no-avx: Don't do AVX\n"
+ "--no-sse: Don't do SSE\n"
+ "--aesni-emu: Do AESNI_EMU (disabled by default)\n"
+ "--shani-on: use SHA extensions, default: auto-detect\n"
+ "--shani-off: don't use SHA extensions\n"
+ "--job-size: size of the cipher & MAC job in bytes. "
+ "It can be:\n"
+ " - single value: test single size\n"
+ " - range: test multiple sizes with following format"
+ " min:step:max (e.g. 16:16:256)\n"
+ " (-o still applies for MAC)\n"
+ "--job-iter: number of tests iterations for each job size\n"
+ "--safe-check: check if keys, IVs, plaintext or tags "
+ "get cleared from MB_MGR upon job completion (off by default; "
+ "requires library compiled with SAFE_DATA)\n");
+}
+
+static int
+get_next_num_arg(const char * const *argv, const int index, const int argc,
+ void *dst, const size_t dst_size)
+{
+ char *endptr = NULL;
+ uint64_t val;
+
+ if (dst == NULL || argv == NULL || index < 0 || argc < 0) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (index >= (argc - 1)) {
+ fprintf(stderr, "'%s' requires an argument!\n", argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef _WIN32
+ val = _strtoui64(argv[index + 1], &endptr, 0);
+#else
+ val = strtoull(argv[index + 1], &endptr, 0);
+#endif
+ if (endptr == argv[index + 1] || (endptr != NULL && *endptr != '\0')) {
+ fprintf(stderr, "Error converting '%s' as value for '%s'!\n",
+ argv[index + 1], argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (dst_size) {
+ case (sizeof(uint8_t)):
+ *((uint8_t *)dst) = (uint8_t) val;
+ break;
+ case (sizeof(uint16_t)):
+ *((uint16_t *)dst) = (uint16_t) val;
+ break;
+ case (sizeof(uint32_t)):
+ *((uint32_t *)dst) = (uint32_t) val;
+ break;
+ case (sizeof(uint64_t)):
+ *((uint64_t *)dst) = val;
+ break;
+ default:
+ fprintf(stderr, "%s() invalid dst_size %u!\n",
+ __func__, (unsigned) dst_size);
+ exit(EXIT_FAILURE);
+ break;
+ }
+
+ return index + 1;
+}
+
+static int
+detect_arch(unsigned int arch_support[NUM_ARCHS])
+{
+ const uint64_t detect_sse =
+ IMB_FEATURE_SSE4_2 | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx =
+ IMB_FEATURE_AVX | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx2 = IMB_FEATURE_AVX2 | detect_avx;
+ const uint64_t detect_avx512 = IMB_FEATURE_AVX512_SKX | detect_avx2;
+ MB_MGR *p_mgr = NULL;
+ enum arch_type_e arch_id;
+
+ if (arch_support == NULL) {
+ fprintf(stderr, "Array not passed correctly\n");
+ return -1;
+ }
+
+ for (arch_id = ARCH_SSE; arch_id < NUM_ARCHS; arch_id++)
+ arch_support[arch_id] = 1;
+
+ p_mgr = alloc_mb_mgr(0);
+ if (p_mgr == NULL) {
+ fprintf(stderr, "Architecture detect error!\n");
+ return -1;
+ }
+
+ if ((p_mgr->features & detect_avx512) != detect_avx512)
+ arch_support[ARCH_AVX512] = 0;
+
+ if ((p_mgr->features & detect_avx2) != detect_avx2)
+ arch_support[ARCH_AVX2] = 0;
+
+ if ((p_mgr->features & detect_avx) != detect_avx)
+ arch_support[ARCH_AVX] = 0;
+
+ if ((p_mgr->features & detect_sse) != detect_sse) {
+ arch_support[ARCH_SSE] = 0;
+ arch_support[ARCH_AESNI_EMU] = 0;
+ }
+
+ free_mb_mgr(p_mgr);
+
+ return 0;
+}
+
+/*
+ * Check string argument is supported and if it is, return values associated
+ * with it.
+ */
+static const union params *
+check_string_arg(const char *param, const char *arg,
+ const struct str_value_mapping *map,
+ const unsigned int num_avail_opts)
+{
+ unsigned int i;
+
+ if (arg == NULL) {
+ fprintf(stderr, "%s requires an argument\n", param);
+ goto exit;
+ }
+
+ for (i = 0; i < num_avail_opts; i++)
+ if (strcmp(arg, map[i].name) == 0)
+ return &(map[i].values);
+
+ /* Argument is not listed in the available options */
+ fprintf(stderr, "Invalid argument for %s\n", param);
+exit:
+ fprintf(stderr, "Accepted arguments: ");
+ for (i = 0; i < num_avail_opts; i++)
+ fprintf(stderr, "%s ", map[i].name);
+ fprintf(stderr, "\n");
+
+ return NULL;
+}
+
+static int
+parse_range(const char * const *argv, const int index, const int argc,
+ uint32_t range_values[NUM_RANGE])
+{
+ char *token;
+ uint32_t number;
+ unsigned int i;
+
+
+ if (range_values == NULL || argv == NULL || index < 0 || argc < 0) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ if (index >= (argc - 1)) {
+ fprintf(stderr, "'%s' requires an argument!\n", argv[index]);
+ exit(EXIT_FAILURE);
+ }
+
+ char *copy_arg = strdup(argv[index + 1]);
+
+ if (copy_arg == NULL) {
+ fprintf(stderr, "%s() internal error!\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+
+ errno = 0;
+ token = strtok(copy_arg, ":");
+
+ /* Try parsing range (minimum, step and maximum values) */
+ for (i = 0; i < NUM_RANGE; i++) {
+ if (token == NULL)
+ goto no_range;
+
+ number = strtoul(token, NULL, 10);
+
+ if (errno != 0)
+ goto no_range;
+
+ range_values[i] = number;
+ token = strtok(NULL, ":");
+ }
+
+ if (token != NULL)
+ goto no_range;
+
+ if (range_values[RANGE_MAX] < range_values[RANGE_MIN]) {
+ fprintf(stderr, "Maximum value of range cannot be lower "
+ "than minimum value\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (range_values[RANGE_STEP] == 0) {
+ fprintf(stderr, "Step value in range cannot be 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+ goto end_range;
+no_range:
+ /* Try parsing as single value */
+ get_next_num_arg(argv, index, argc, &job_sizes[RANGE_MIN],
+ sizeof(job_sizes[RANGE_MIN]));
+
+ job_sizes[RANGE_MAX] = job_sizes[RANGE_MIN];
+
+end_range:
+ free(copy_arg);
+ return (index + 1);
+
+}
+
+int main(int argc, char *argv[])
+{
+ int i;
+ unsigned int arch_id;
+ unsigned int arch_support[NUM_ARCHS];
+ const union params *values;
+ unsigned int cipher_algo_set = 0;
+ unsigned int hash_algo_set = 0;
+ unsigned int aead_algo_set = 0;
+ unsigned int safe_check = 0;
+
+ for (i = 1; i < argc; i++)
+ if (strcmp(argv[i], "-h") == 0) {
+ usage();
+ return EXIT_SUCCESS;
+ } else if (strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ } else if (strcmp(argv[i], "--no-avx512") == 0) {
+ enc_archs[ARCH_AVX512] = 0;
+ dec_archs[ARCH_AVX512] = 0;
+ } else if (strcmp(argv[i], "--no-avx2") == 0) {
+ enc_archs[ARCH_AVX2] = 0;
+ dec_archs[ARCH_AVX2] = 0;
+ } else if (strcmp(argv[i], "--no-avx") == 0) {
+ enc_archs[ARCH_AVX] = 0;
+ dec_archs[ARCH_AVX] = 0;
+ } else if (strcmp(argv[i], "--no-sse") == 0) {
+ enc_archs[ARCH_SSE] = 0;
+ dec_archs[ARCH_SSE] = 0;
+ } else if (strcmp(argv[i], "--aesni-emu") == 0) {
+ enc_archs[ARCH_AESNI_EMU] = 1;
+ dec_archs[ARCH_AESNI_EMU] = 1;
+ } else if (strcmp(argv[i], "--shani-on") == 0) {
+ flags &= (~IMB_FLAG_SHANI_OFF);
+ } else if (strcmp(argv[i], "--shani-off") == 0) {
+ flags |= IMB_FLAG_SHANI_OFF;
+ } else if (strcmp(argv[i], "--enc-arch") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ arch_str_map,
+ DIM(arch_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ /*
+ * Disable all the other architectures
+ * and enable only the specified
+ */
+ memset(enc_archs, 0, sizeof(enc_archs));
+ enc_archs[values->arch_type] = 1;
+ i++;
+ } else if (strcmp(argv[i], "--dec-arch") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ arch_str_map,
+ DIM(arch_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ /*
+ * Disable all the other architectures
+ * and enable only the specified
+ */
+ memset(dec_archs, 0, sizeof(dec_archs));
+ dec_archs[values->arch_type] = 1;
+ i++;
+ } else if (strcmp(argv[i], "--cipher-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ cipher_algo_str_map,
+ DIM(cipher_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.cipher_mode =
+ values->job_params.cipher_mode;
+ custom_job_params.key_size =
+ values->job_params.key_size;
+ custom_test = 1;
+ cipher_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--hash-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ hash_algo_str_map,
+ DIM(hash_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.hash_alg =
+ values->job_params.hash_alg;
+ custom_test = 1;
+ hash_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--aead-algo") == 0) {
+ values = check_string_arg(argv[i], argv[i+1],
+ aead_algo_str_map,
+ DIM(aead_algo_str_map));
+ if (values == NULL)
+ return EXIT_FAILURE;
+
+ custom_job_params.cipher_mode =
+ values->job_params.cipher_mode;
+ custom_job_params.key_size =
+ values->job_params.key_size;
+ custom_job_params.hash_alg =
+ values->job_params.hash_alg;
+ custom_test = 1;
+ aead_algo_set = 1;
+ i++;
+ } else if (strcmp(argv[i], "--job-size") == 0) {
+ /* Try parsing the argument as a range first */
+ i = parse_range((const char * const *)argv, i, argc,
+ job_sizes);
+ if (job_sizes[RANGE_MAX] > JOB_SIZE_TOP) {
+ fprintf(stderr,
+ "Invalid job size %u (max %u)\n",
+ (unsigned) job_sizes[RANGE_MAX],
+ JOB_SIZE_TOP);
+ return EXIT_FAILURE;
+ }
+ } else if (strcmp(argv[i], "--job-iter") == 0) {
+ i = get_next_num_arg((const char * const *)argv, i,
+ argc, &job_iter, sizeof(job_iter));
+ } else if (strcmp(argv[i], "--safe-check") == 0) {
+ safe_check = 1;
+ } else {
+ usage();
+ return EXIT_FAILURE;
+ }
+
+ if (custom_test) {
+ if (aead_algo_set && (cipher_algo_set || hash_algo_set)) {
+ fprintf(stderr, "AEAD algorithm cannot be used "
+ "combined with another cipher/hash "
+ "algorithm\n");
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (job_sizes[RANGE_MIN] == 0) {
+ fprintf(stderr, "Buffer size cannot be 0 unless only "
+ "an AEAD algorithm is tested\n");
+ return EXIT_FAILURE;
+ }
+
+ if (detect_arch(arch_support) < 0)
+ return EXIT_FAILURE;
+
+ /* disable tests depending on instruction sets supported */
+ for (arch_id = 0; arch_id < NUM_ARCHS; arch_id++) {
+ if (arch_support[arch_id] == 0) {
+ enc_archs[arch_id] = 0;
+ dec_archs[arch_id] = 0;
+ fprintf(stderr,
+ "%s not supported. Disabling %s tests\n",
+ arch_str_map[arch_id].name,
+ arch_str_map[arch_id].name);
+ }
+ }
+
+ MB_MGR *p_mgr = alloc_mb_mgr(flags);
+ if (p_mgr == NULL) {
+ fprintf(stderr, "Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+
+ if (safe_check && ((p_mgr->features & IMB_FEATURE_SAFE_DATA) == 0)) {
+ fprintf(stderr, "Library needs to be compiled with SAFE_DATA "
+ "if --safe-check is enabled\n");
+ free_mb_mgr(p_mgr);
+ return EXIT_FAILURE;
+ }
+ if (enc_archs[ARCH_SSE] || dec_archs[ARCH_SSE]) {
+ init_mb_mgr_sse(p_mgr);
+ fprintf(stderr, "%s SHA extensions (shani) for SSE arch\n",
+ (p_mgr->features & IMB_FEATURE_SHANI) ?
+ "Using" : "Not using");
+ }
+ free_mb_mgr(p_mgr);
+
+ srand(SEED);
+
+ run_tests(safe_check);
+
+ return EXIT_SUCCESS;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test.c
new file mode 100644
index 000000000..a6d95c769
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test.c
@@ -0,0 +1,1327 @@
+/*****************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+/*-----------------------------------------------------------------------
+* KASUMI functional test
+*-----------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+#include "kasumi_test_vectors.h"
+
+#define KASUMIIVLEN 8
+#define PAD_LEN 16
+
+cipher_test_vector_t *vecList[MAX_DATA_LEN];
+
+int kasumi_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+static int membitcmp(const uint8_t *input, const uint8_t *output,
+ const uint32_t bitoffset, const uint32_t bitlength)
+{
+ uint32_t bitresoffset;
+ uint8_t bitresMask = (uint8_t)-1 >> (bitoffset % CHAR_BIT);
+ uint32_t res;
+ uint32_t bytelengthfl = bitlength / CHAR_BIT;
+ const uint8_t *srcfl = input + bitoffset / CHAR_BIT;
+ const uint8_t *dstfl = output + bitoffset / CHAR_BIT;
+ int index = 1;
+
+ if (bitoffset % CHAR_BIT) {
+ if ((*srcfl ^ *dstfl) & bitresMask)
+ return 1;
+ else {
+ bytelengthfl--;
+ srcfl++;
+ dstfl++;
+ }
+ }
+ bitresoffset = (bitlength + bitoffset) % CHAR_BIT;
+ while (bytelengthfl--) {
+ res = *srcfl ^ *dstfl;
+ if (res) {
+ if (bytelengthfl != 1)
+ return index;
+ else if (bitresoffset < CHAR_BIT) {
+ if (res & ~((uint8_t)-1 << bitresoffset))
+ return index;
+ else
+ res = 0;
+ } else {
+ srcfl++;
+ dstfl++;
+ index++;
+ }
+ } else {
+ srcfl++;
+ dstfl++;
+ index++;
+ }
+ }
+ if (bitresoffset > CHAR_BIT)
+ res = (*srcfl ^ *dstfl) &
+ ~((uint8_t)-1 >> (bitresoffset % CHAR_BIT));
+ else if (bitresoffset == CHAR_BIT)
+ res = (*srcfl ^ *dstfl) &
+ ~((uint8_t)-1 >> (bitoffset % CHAR_BIT));
+ else
+ res = 0;
+
+ return res;
+}
+
+static inline void hexdump(const char *message, const uint8_t *ptr, int len)
+{
+ int ctr;
+
+ printf("%s:\n", message);
+ for (ctr = 0; ctr < len; ctr++) {
+ printf("0x%02X ", ptr[ctr] & 0xff);
+ if (!((ctr + 1) % 16))
+ printf("\n");
+ }
+ printf("\n");
+ printf("\n");
+}
+
+static int validate_kasumi_f8_1_block(MB_MGR *mgr)
+{
+ int numKasumiTestVectors, i = 0;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t dstBuff[MAX_DATA_LEN];
+ uint64_t IV;
+ kasumi_key_sched_t *pKeySched = NULL;
+ cipher_test_vector_t *kasumi_test_vectors = NULL;
+
+ kasumi_test_vectors = kasumi_f8_vectors;
+ numKasumiTestVectors = numCipherTestVectors[0];
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ return 1;
+ }
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ return 1;
+ }
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ free(pKey);
+ return 1;
+ }
+
+ /* Copy the data for for Kasumi_f8 1 Packet version */
+ for (i = 0; i < numKasumiTestVectors; i++) {
+ memcpy(pKey, kasumi_test_vectors[i].key,
+ kasumi_test_vectors[i].keyLenInBytes);
+ memcpy(srcBuff, kasumi_test_vectors[i].plaintext,
+ kasumi_test_vectors[i].dataLenInBytes);
+ memcpy(dstBuff, kasumi_test_vectors[i].ciphertext,
+ kasumi_test_vectors[i].dataLenInBytes);
+ memcpy((uint8_t *)&IV, kasumi_test_vectors[i].iv,
+ kasumi_test_vectors[i].ivLenInBytes);
+
+ /*setup the keysched to be used*/
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+
+ /*Validate Encrpyt*/
+ IMB_KASUMI_F8_1_BUFFER(mgr, pKeySched, IV, srcBuff, srcBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ /*check against the cipher test in the vector against the
+ * encrypted
+ * plaintext*/
+ if (memcmp(srcBuff, dstBuff,
+ kasumi_test_vectors[i].dataLenInBytes) != 0) {
+ printf("kasumi_f8_1_block(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+ hexdump("Expected:", dstBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+
+ memcpy(dstBuff, kasumi_test_vectors[i].plaintext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ /*Validate Decrpyt*/
+ IMB_KASUMI_F8_1_BUFFER(mgr, pKeySched, IV, srcBuff, srcBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ if (memcmp(srcBuff, dstBuff,
+ kasumi_test_vectors[i].dataLenInBytes) != 0) {
+ printf("kasumi_f8_1_block(Dec) vector:%d\n", i);
+ hexdump("Actual:", srcBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+ hexdump("Expected:", dstBuff,
+ kasumi_test_vectors[i].dataLenInBytes);
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+ }
+
+ free(pKey);
+ free(pKeySched);
+ printf("[%s]: PASS, for %d single buffers.\n", __FUNCTION__, i);
+ return 0;
+}
+
+/* Shift right a buffer by "offset" bits, "offset" < 8 */
+static void buffer_shift_right(uint8_t *buffer, uint32_t length, uint8_t offset)
+{
+ uint8_t curr_byte, prev_byte;
+ const uint32_t length_in_bytes = (length + offset + 7) / CHAR_BIT;
+ const uint8_t lower_byte_mask = (1 << offset) - 1;
+ uint32_t i;
+
+ /* Padding */
+ prev_byte = 0xff;
+
+ for (i = 0; i < length_in_bytes; i++) {
+ curr_byte = buffer[i];
+ buffer[i] = ((prev_byte & lower_byte_mask) << (8 - offset)) |
+ (curr_byte >> offset);
+ prev_byte = curr_byte;
+ }
+}
+
+static void copy_test_bufs(uint8_t *plainBuff, uint8_t *wrkBuff,
+ uint8_t *ciphBuff, const uint8_t *src_test,
+ const uint8_t *dst_test, const uint32_t byte_len)
+{
+ /* Reset all buffers to -1 (for padding check) and copy test vectors */
+ memset(wrkBuff, -1, (byte_len + PAD_LEN * 2));
+ memset(plainBuff, -1, (byte_len + PAD_LEN * 2));
+ memset(ciphBuff, -1, (byte_len + PAD_LEN * 2));
+ memcpy(plainBuff + PAD_LEN, src_test, byte_len);
+ memcpy(ciphBuff + PAD_LEN, dst_test, byte_len);
+}
+
+static int validate_kasumi_f8_1_bitblock(MB_MGR *mgr)
+{
+ int numKasumiTestVectors, i = 0;
+ kasumi_key_sched_t *pKeySched = NULL;
+ const cipherbit_test_vector_t *kasumi_bit_vectors = NULL;
+
+ kasumi_bit_vectors = kasumi_f8_bitvectors;
+ numKasumiTestVectors = numCipherTestVectors[1];
+
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t plainBuff[MAX_DATA_LEN];
+ uint8_t ciphBuff[MAX_DATA_LEN];
+ uint8_t wrkBuff[MAX_DATA_LEN];
+ /* Adding extra byte for offset tests (shifting 4 bits) */
+ uint8_t padding[PAD_LEN + 1];
+ uint64_t IV;
+ int ret = 1;
+
+ memset(padding, -1, PAD_LEN + 1);
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ return 1;
+ }
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ return 1;
+ }
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ free(pKey);
+ return 1;
+ }
+
+ /* Copy the data for for Kasumi_f8 1 Packet version*/
+ for (i = 0; i < numKasumiTestVectors; i++) {
+ uint8_t *wrkBufBefPad = wrkBuff;
+ uint8_t *wrkBufAftPad = wrkBuff + PAD_LEN;
+ uint8_t *plainBufBefPad = plainBuff;
+ uint8_t *plainBufAftPad = plainBuff + PAD_LEN;
+ uint8_t *ciphBufBefPad = ciphBuff;
+ uint8_t *ciphBufAftPad = ciphBuff + PAD_LEN;
+
+ const uint32_t byte_len =
+ (kasumi_bit_vectors[i].LenInBits + 7) / CHAR_BIT;
+ const uint32_t bit_len = kasumi_bit_vectors[i].LenInBits;
+
+ memcpy(pKey, kasumi_bit_vectors[i].key,
+ kasumi_bit_vectors[i].keyLenInBytes);
+ memcpy((uint8_t *)&IV, kasumi_bit_vectors[i].iv,
+ kasumi_bit_vectors[i].ivLenInBytes);
+ copy_test_bufs(plainBufBefPad, wrkBufBefPad, ciphBufBefPad,
+ kasumi_bit_vectors[i].plaintext,
+ kasumi_bit_vectors[i].ciphertext,
+ byte_len);
+
+ /* Setup the keysched to be used */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ goto end;
+ }
+
+ /* Validate Encrypt */
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, plainBufAftPad,
+ wrkBufAftPad, bit_len, 0);
+
+ /* Check the ciphertext in the vector against the
+ * encrypted plaintext */
+ if (membitcmp(wrkBufAftPad, ciphBufAftPad, 0, bit_len) != 0) {
+ printf("kasumi_f8_1_block(Enc) offset=0 vector:%d\n",
+ i);
+ hexdump("Actual:", wrkBufAftPad, byte_len);
+ hexdump("Expected:", ciphBufAftPad, byte_len);
+ goto end;
+ }
+ /* Check that data not to be ciphered was not overwritten */
+ if (memcmp(wrkBufBefPad, ciphBufBefPad, PAD_LEN)) {
+ printf("overwrite head\n");
+ hexdump("Head", wrkBufBefPad, PAD_LEN);
+ goto end;
+ }
+ if (memcmp(wrkBufAftPad + byte_len - 1,
+ ciphBufAftPad + byte_len - 1,
+ PAD_LEN + 1)) {
+ printf("overwrite tail\n");
+ hexdump("Tail", wrkBufAftPad + byte_len - 1,
+ PAD_LEN + 1);
+ goto end;
+ }
+ /* Validate Decrypt */
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, ciphBufAftPad,
+ wrkBufAftPad, bit_len, 0);
+
+ if (membitcmp(wrkBufAftPad, plainBufAftPad, 0,
+ kasumi_bit_vectors[i].LenInBits) != 0) {
+ printf("kasumi_f8_1_block(Dec) offset=0 vector:%d\n",
+ i);
+ hexdump("Actual:", wrkBufAftPad, byte_len);
+ hexdump("Expected:", plainBufAftPad, byte_len);
+ goto end;
+ }
+ copy_test_bufs(plainBufBefPad, wrkBufBefPad, ciphBufBefPad,
+ kasumi_bit_vectors[i].plaintext,
+ kasumi_bit_vectors[i].ciphertext,
+ byte_len);
+ buffer_shift_right(plainBufBefPad, (byte_len + PAD_LEN * 2) * 8,
+ 4);
+ buffer_shift_right(ciphBufBefPad, (byte_len + PAD_LEN * 2) * 8,
+ 4);
+
+ /* Validate Encrypt */
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, plainBufAftPad,
+ wrkBufAftPad, bit_len, 4);
+
+ /* Check the ciphertext in the vector against the
+ * encrypted plaintext */
+ if (membitcmp(wrkBufAftPad, ciphBufAftPad, 4, bit_len) != 0) {
+ printf("kasumi_f8_1_block(Enc) offset=4 vector:%d\n",
+ i);
+ hexdump("Actual:", wrkBufAftPad, byte_len);
+ hexdump("Expected:", ciphBufAftPad, byte_len);
+ goto end;
+ }
+ /*Validate Decrypt*/
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, ciphBufAftPad,
+ wrkBufAftPad, bit_len, 4);
+
+ if (membitcmp(plainBufAftPad, plainBufAftPad, 4,
+ bit_len) != 0) {
+ printf("kasumi_f8_1_block(Dec) offset=4 vector:%d\n",
+ i);
+ hexdump("Actual:", wrkBufAftPad, byte_len);
+ hexdump("Expected:", plainBufAftPad, byte_len);
+ goto end;
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS, for %d single buffers.\n", __FUNCTION__, i);
+end:
+ free(pKey);
+ free(pKeySched);
+ return ret;
+}
+
+static int validate_kasumi_f8_1_bitblock_offset(MB_MGR *mgr)
+{
+ int numKasumiTestVectors, i = 0;
+ kasumi_key_sched_t *pKeySched = NULL;
+ const cipherbit_test_linear_vector_t *kasumi_bit_vectors = NULL;
+
+ kasumi_bit_vectors = &kasumi_f8_linear_bitvectors;
+ numKasumiTestVectors = numCipherTestVectors[1];
+
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t dstBuff[MAX_DATA_LEN];
+ uint64_t IV;
+ uint32_t bufferbytesize = 0;
+ uint8_t wrkbuf[MAX_DATA_LEN];
+ uint32_t offset = 0, byteoffset = 0, ret;
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ return 1;
+ }
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ return 1;
+ }
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ free(pKey);
+ return 1;
+ }
+ for (i = 0; i < numKasumiTestVectors; i++)
+ bufferbytesize += kasumi_bit_vectors->LenInBits[i];
+
+ bufferbytesize = (bufferbytesize + 7) / CHAR_BIT;
+ memcpy(srcBuff, kasumi_bit_vectors->plaintext, bufferbytesize);
+ memcpy(dstBuff, kasumi_bit_vectors->ciphertext, bufferbytesize);
+
+ /* Copy the data for for Kasumi_f8 1 Packet version */
+ for (i = 0, offset = 0, byteoffset = 0; i < numKasumiTestVectors; i++) {
+
+ memcpy(pKey, &kasumi_bit_vectors->key[i][0],
+ kasumi_bit_vectors->keyLenInBytes);
+ memcpy((uint8_t *)&IV, &kasumi_bit_vectors->iv[i][0],
+ kasumi_bit_vectors->ivLenInBytes);
+
+ /* Setup the keysched to be used */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+
+ /* Validate Encrypt */
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, srcBuff, wrkbuf,
+ kasumi_bit_vectors->LenInBits[i],
+ offset);
+
+ /* Check against the ciphertext in the vector against the
+ * encrypted plaintext */
+ ret = membitcmp(wrkbuf, dstBuff, offset,
+ kasumi_bit_vectors->LenInBits[i]);
+ if (ret != 0) {
+ printf("kasumi_f8_1_block_linear(Enc) vector:%d, "
+ "index:%d\n",
+ i, ret);
+ hexdump("Actual:", &wrkbuf[byteoffset],
+ (kasumi_bit_vectors->LenInBits[i] + 7) /
+ CHAR_BIT);
+ hexdump("Expected:", &dstBuff[byteoffset],
+ (kasumi_bit_vectors->LenInBits[i] + 7) /
+ CHAR_BIT);
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+ offset += kasumi_bit_vectors->LenInBits[i];
+ byteoffset = offset / CHAR_BIT;
+ }
+ for (i = 0, offset = 0, byteoffset = 0; i < numKasumiTestVectors; i++) {
+ memcpy(pKey, &kasumi_bit_vectors->key[i][0],
+ kasumi_bit_vectors->keyLenInBytes);
+ memcpy((uint8_t *)&IV, &kasumi_bit_vectors->iv[i][0],
+ kasumi_bit_vectors->ivLenInBytes);
+
+ /* Setup the keysched to be used */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+
+ /* Validate Decrypt */
+ IMB_KASUMI_F8_1_BUFFER_BIT(mgr, pKeySched, IV, dstBuff, wrkbuf,
+ kasumi_bit_vectors->LenInBits[i],
+ offset);
+
+ ret = membitcmp(wrkbuf, srcBuff, offset,
+ kasumi_bit_vectors->LenInBits[i]);
+ if (ret != 0) {
+ printf("kasumi_f8_1_block_linear(Dec) "
+ "vector:%d,index:%d\n",
+ i, ret);
+ hexdump("Actual:", &wrkbuf[byteoffset],
+ (kasumi_bit_vectors->LenInBits[i] + 7) /
+ CHAR_BIT);
+ hexdump("Expected:", &srcBuff[byteoffset],
+ (kasumi_bit_vectors->LenInBits[i] + 7) /
+ CHAR_BIT);
+ free(pKey);
+ free(pKeySched);
+ return 1;
+ }
+ offset += kasumi_bit_vectors->LenInBits[i];
+ byteoffset = offset / CHAR_BIT;
+ }
+
+ free(pKey);
+ free(pKeySched);
+ printf("[%s]: PASS, for %d single buffers.\n", __FUNCTION__, i);
+ return 0;
+}
+
+static int validate_kasumi_f8_2_blocks(MB_MGR *mgr)
+{
+
+ int numKasumiTestVectors, i = 0, numPackets = 2;
+ const cipher_test_vector_t *kasumi_test_vectors = NULL;
+ kasumi_key_sched_t *keySched = NULL;
+
+ kasumi_test_vectors = cipher_test_vectors[0];
+ numKasumiTestVectors = numCipherTestVectors[0];
+
+ uint8_t *key = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint64_t iv[3];
+ uint8_t *srcBuff[3] = {NULL};
+ uint8_t *dstBuff[3] = {NULL};
+ uint32_t packetLen[3];
+ int ret = 1;
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ goto exit;
+ }
+
+ key = malloc(keyLen);
+ if (!key) {
+ printf("malloc(key):failed !\n");
+ goto exit;
+ }
+
+ keySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!keySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ /* Create test Data for num Packets + 1 */
+ for (i = 0; i < numPackets + 1; i++) {
+ packetLen[i] = kasumi_test_vectors[i].dataLenInBytes;
+ srcBuff[i] = malloc(packetLen[i]);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+ dstBuff[i] = malloc(packetLen[i]);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+
+ memcpy(key, kasumi_test_vectors[i].key,
+ kasumi_test_vectors[i].keyLenInBytes);
+
+ memcpy(srcBuff[i], kasumi_test_vectors[i].plaintext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ memcpy(dstBuff[i], kasumi_test_vectors[i].ciphertext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ memcpy(&iv[i], kasumi_test_vectors[i].iv,
+ kasumi_test_vectors[i].ivLenInBytes);
+ }
+ /* Only 1 key is needed for kasumi 2 blocks */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, key, keySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ goto exit;
+ }
+ /* Test the encrypt */
+ IMB_KASUMI_F8_2_BUFFER(mgr, keySched, iv[0], iv[1], srcBuff[0],
+ srcBuff[0], packetLen[0], srcBuff[1], srcBuff[1],
+ packetLen[1]);
+
+ /* Compare the ciphertext with the encrypted plaintext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], kasumi_test_vectors[i].ciphertext,
+ packetLen[i]) != 0) {
+ printf("kasumi_f8_2_buffer(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff[i], packetLen[i]);
+ hexdump("Expected:", kasumi_test_vectors[i].ciphertext,
+ packetLen[i]);
+ goto exit;
+ }
+ }
+ for (i = 0; i < numPackets; i++)
+ memcpy(srcBuff[i], kasumi_test_vectors[i].plaintext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ /* Test the encrypt reverse order */
+ IMB_KASUMI_F8_2_BUFFER(mgr, keySched, iv[0], iv[1], srcBuff[1],
+ srcBuff[1], packetLen[1], srcBuff[0], srcBuff[0],
+ packetLen[0]);
+
+ /* Compare the ciphertext with the encrypted plaintext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], kasumi_test_vectors[i].ciphertext,
+ packetLen[i]) != 0) {
+ printf("kasumi_f8_2_buffer(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff[i], packetLen[i]);
+ hexdump("Expected:", kasumi_test_vectors[i].ciphertext,
+ packetLen[i]);
+ goto exit;
+ }
+ }
+ for (i = 0; i < numPackets + 1; i++)
+ memcpy(srcBuff[i], kasumi_test_vectors[i].plaintext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ /* Test the encrypt reverse order */
+ IMB_KASUMI_F8_2_BUFFER(mgr, keySched, iv[0], iv[1], srcBuff[0],
+ srcBuff[0], packetLen[0], srcBuff[2], srcBuff[2],
+ packetLen[2]);
+
+ /* Compare the ciphertext with the encrypted plaintext*/
+ for (i = 0; i < numPackets + 1; i++) {
+ if (i == 1)
+ continue;
+ if (memcmp(srcBuff[i], kasumi_test_vectors[i].ciphertext,
+ packetLen[i]) != 0) {
+ printf("kasumi_f8_2_buffer(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff[i], packetLen[i]);
+ hexdump("Expected:", kasumi_test_vectors[i].ciphertext,
+ packetLen[i]);
+ goto exit;
+ }
+ }
+
+ /* Test the decrypt */
+ IMB_KASUMI_F8_2_BUFFER(mgr, keySched, iv[0], iv[1], dstBuff[0],
+ dstBuff[0], packetLen[0], dstBuff[1], dstBuff[1],
+ packetLen[1]);
+
+ /* Compare the plaintext with the decrypted ciphertext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], kasumi_test_vectors[i].plaintext,
+ packetLen[i]) != 0) {
+ printf("kasumi_f8_2_buffer(Dec) vector:%d\n", i);
+ hexdump("Actual:", dstBuff[i], packetLen[i]);
+ hexdump("Expected:", kasumi_test_vectors[i].plaintext,
+ packetLen[i]);
+ goto exit;
+ }
+ }
+ /* Test the decrypt reverse order */
+ for (i = 0; i < numPackets; i++)
+ memcpy(dstBuff[i], kasumi_test_vectors[i].ciphertext,
+ kasumi_test_vectors[i].dataLenInBytes);
+
+ IMB_KASUMI_F8_2_BUFFER(mgr, keySched, iv[0], iv[1], dstBuff[1],
+ dstBuff[1], packetLen[1], dstBuff[0], dstBuff[0],
+ packetLen[0]);
+
+ /* Compare the plaintext with the decrypted ciphertext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], kasumi_test_vectors[i].plaintext,
+ packetLen[i]) != 0) {
+ printf("kasumi_f8_2_buffer(Dec) vector:%d\n", i);
+ hexdump("Actual:", dstBuff[i], packetLen[i]);
+ hexdump("Expected:", kasumi_test_vectors[i].plaintext,
+ packetLen[i]);
+ goto exit;
+ }
+ }
+
+ ret = 0;
+
+ printf("[%s]: PASS.\n", __FUNCTION__);
+exit:
+ free(key);
+ free(keySched);
+ for (i = 0; i < numPackets + 1; i++) {
+ free(srcBuff[i]);
+ free(dstBuff[i]);
+ }
+ return ret;
+}
+
+static int validate_kasumi_f8_3_blocks(MB_MGR *mgr)
+{
+ int numKasumiTestVectors, i = 0, numPackets = 3;
+ const cipher_test_vector_t *kasumi_test_vectors = NULL;
+ kasumi_key_sched_t *keySched = NULL;
+
+ kasumi_test_vectors = cipher_test_vectors[0];
+ numKasumiTestVectors = numCipherTestVectors[0];
+
+ uint8_t *key = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint64_t iv[3];
+ uint8_t *srcBuff[3] = {NULL};
+ uint8_t *dstBuff[3] = {NULL};
+ uint32_t packetLen;
+ int ret = 1;
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ goto exit;
+ }
+
+ key = malloc(keyLen);
+ if (!key) {
+ printf("malloc(key):failed !\n");
+ goto exit;
+ }
+
+ keySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!keySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ packetLen = kasumi_test_vectors[0].dataLenInBytes;
+
+ /* Create test Data for num Packets */
+ for (i = 0; i < numPackets; i++) {
+ srcBuff[i] = malloc(packetLen);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+ dstBuff[i] = malloc(packetLen);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+
+ memcpy(key, kasumi_test_vectors[0].key,
+ kasumi_test_vectors[0].keyLenInBytes);
+
+ memcpy(srcBuff[i], kasumi_test_vectors[0].plaintext,
+ kasumi_test_vectors[0].dataLenInBytes);
+
+ memcpy(dstBuff[i], kasumi_test_vectors[0].ciphertext,
+ kasumi_test_vectors[0].dataLenInBytes);
+
+ memcpy(&iv[i], kasumi_test_vectors[0].iv,
+ kasumi_test_vectors[0].ivLenInBytes);
+ }
+
+ /* Only 1 key is needed for kasumi 3 blocks */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, key, keySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ goto exit;
+ }
+
+ /* Test the encrypt */
+ IMB_KASUMI_F8_3_BUFFER(mgr, keySched, iv[0], iv[1], iv[2], srcBuff[0],
+ srcBuff[0], srcBuff[1], srcBuff[1], srcBuff[2],
+ srcBuff[2], packetLen);
+
+ /* Compare the ciphertext with the encrypted plaintext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], kasumi_test_vectors[0].ciphertext,
+ packetLen) != 0) {
+ printf("kasumi_f8_3_buffer(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff[i], packetLen);
+ hexdump("Expected:", kasumi_test_vectors[0].ciphertext,
+ packetLen);
+ goto exit;
+ }
+ }
+
+ /* Test the decrypt */
+ IMB_KASUMI_F8_3_BUFFER(mgr, keySched, iv[0], iv[1], iv[2], dstBuff[0],
+ dstBuff[0], dstBuff[1], dstBuff[1], dstBuff[2],
+ dstBuff[2], packetLen);
+
+ /* Compare the plaintext with the decrypted ciphertext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], kasumi_test_vectors[0].plaintext,
+ packetLen) != 0) {
+ printf("kasumi_f8_3_buffer(Dec) vector:%d\n", i);
+ hexdump("Actual:", dstBuff[i], packetLen);
+ hexdump("Expected:", kasumi_test_vectors[0].plaintext,
+ packetLen);
+ goto exit;
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS.\n", __FUNCTION__);
+exit:
+ free(key);
+ free(keySched);
+ for (i = 0; i < numPackets; i++) {
+ free(srcBuff[i]);
+ free(dstBuff[i]);
+ }
+ return ret;
+}
+
+static int validate_kasumi_f8_4_blocks(MB_MGR *mgr)
+{
+ int numKasumiTestVectors, i = 0, numPackets = 4;
+ const cipher_test_vector_t *kasumi_test_vectors = NULL;
+ kasumi_key_sched_t *keySched = NULL;
+
+ kasumi_test_vectors = cipher_test_vectors[0];
+ numKasumiTestVectors = numCipherTestVectors[0];
+
+ uint8_t *key = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint64_t iv[4];
+ uint8_t *srcBuff[4] = {NULL};
+ uint8_t *dstBuff[4] = {NULL};
+ uint32_t packetLen;
+ int ret = 1;
+
+ if (!numKasumiTestVectors) {
+ printf("No Kasumi vectors found !\n");
+ goto exit;
+ }
+
+ key = malloc(keyLen);
+ if (!key) {
+ printf("malloc(key):failed !\n");
+ goto exit;
+ }
+
+ keySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!keySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ packetLen = kasumi_test_vectors[0].dataLenInBytes;
+
+ /* Create test Data for num Packets */
+ for (i = 0; i < numPackets; i++) {
+ srcBuff[i] = malloc(packetLen);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+ dstBuff[i] = malloc(packetLen);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%u]:failed !\n", i);
+ goto exit;
+ }
+
+ memcpy(key, kasumi_test_vectors[0].key,
+ kasumi_test_vectors[0].keyLenInBytes);
+
+ memcpy(srcBuff[i], kasumi_test_vectors[0].plaintext,
+ kasumi_test_vectors[0].dataLenInBytes);
+
+ memcpy(dstBuff[i], kasumi_test_vectors[0].ciphertext,
+ kasumi_test_vectors[0].dataLenInBytes);
+
+ memcpy(&iv[i], kasumi_test_vectors[0].iv,
+ kasumi_test_vectors[0].ivLenInBytes);
+ }
+
+ /* Only 1 key is needed for kasumi 4 blocks */
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, key, keySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ goto exit;
+ }
+
+ /* Test the encrypt */
+ IMB_KASUMI_F8_4_BUFFER(mgr, keySched, iv[0], iv[1], iv[2], iv[3],
+ srcBuff[0], srcBuff[0], srcBuff[1], srcBuff[1],
+ srcBuff[2], srcBuff[2], srcBuff[3], srcBuff[3],
+ packetLen);
+
+ /* Compare the ciphertext with the encrypted plaintext */
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], kasumi_test_vectors[0].ciphertext,
+ packetLen) != 0) {
+ printf("kasumi_f8_4_buffer(Enc) vector:%d\n", i);
+ hexdump("Actual:", srcBuff[i], packetLen);
+ hexdump("Expected:", kasumi_test_vectors[0].ciphertext,
+ packetLen);
+ goto exit;
+ }
+ }
+
+ /* Test the decrypt */
+ IMB_KASUMI_F8_4_BUFFER(mgr, keySched, iv[0], iv[1], iv[2], iv[3],
+ dstBuff[0], dstBuff[0], dstBuff[1], dstBuff[1],
+ dstBuff[2], dstBuff[2], dstBuff[3], dstBuff[3],
+ packetLen);
+
+ /*Compare the plaintext with the decrypted cipher text*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], kasumi_test_vectors[0].plaintext,
+ packetLen) != 0) {
+ printf("kasumi_f8_4_buffer(Dec) vector:%d\n", i);
+ hexdump("Actual:", dstBuff[i], packetLen);
+ hexdump("Expected:", kasumi_test_vectors[0].plaintext,
+ packetLen);
+ goto exit;
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS.\n", __FUNCTION__);
+exit:
+ free(key);
+ free(keySched);
+ for (i = 0; i < numPackets; i++) {
+ free(srcBuff[i]);
+ free(dstBuff[i]);
+ }
+ return ret;
+}
+
+static int validate_kasumi_f8_n_blocks(MB_MGR *mgr)
+{
+ kasumi_key_sched_t *pKeySched = NULL;
+ uint64_t IV[NUM_SUPPORTED_BUFFERS][NUM_SUPPORTED_BUFFERS];
+ uint32_t buffLenInBytes[NUM_SUPPORTED_BUFFERS];
+ uint8_t *srcBuff[NUM_SUPPORTED_BUFFERS][NUM_SUPPORTED_BUFFERS];
+ uint8_t *dstBuff[NUM_SUPPORTED_BUFFERS][NUM_SUPPORTED_BUFFERS];
+ uint8_t key[KASUMI_KEY_SIZE];
+ int i = 0, j = 0;
+ int ret = -1;
+
+ /* Only one key is used */
+ memset(key, 0xAA, KASUMI_KEY_SIZE);
+
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc(IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ if (IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, key, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F8_KEY_SCHED() error\n");
+ goto exit;
+ }
+
+ /* Allocate memory for the buffers fill them with data */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ for (j = 0; j < NUM_SUPPORTED_BUFFERS; j++) {
+ srcBuff[i][j] = malloc(MAX_DATA_LEN);
+ if (!srcBuff[i][j]) {
+ printf("malloc(srcBuff[%u][%u]:failed !\n",
+ i, j);
+ goto exit;
+ }
+ dstBuff[i][j] = malloc(MAX_DATA_LEN);
+ if (!dstBuff[i][j]) {
+ printf("malloc(dstBuff[%u][%u]:failed !\n",
+ i, j);
+ goto exit;
+ }
+
+ memset(srcBuff[i][j], i, MAX_DATA_LEN);
+ memset(dstBuff[i][j], i, MAX_DATA_LEN);
+
+ IV[i][j] = (uint64_t)i;
+ }
+ }
+
+ /* Testing multiple buffers of equal size */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ /* Testing Buffer sizes for 128 */
+ buffLenInBytes[i] = 128;
+
+ /* Test the encrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+ if (srcBuff[i][0] == NULL) {
+ printf("N buffer failure\n");
+ goto exit;
+ }
+
+ /* Test the Decrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+ if (srcBuff[i][0] == NULL) {
+ printf("N buffer failure\n");
+ goto exit;
+ }
+
+ for (j = 0; j <= i; j++) {
+ if (memcmp(srcBuff[i][j], dstBuff[i][j],
+ buffLenInBytes[j]) != 0) {
+ printf("kasumi_f8_n_buffer equal sizes, "
+ "numBuffs:%d\n",
+ i);
+ hexdump("Actual:", srcBuff[i][j],
+ buffLenInBytes[j]);
+ hexdump("Expected:", dstBuff[i][j],
+ buffLenInBytes[j]);
+ goto exit;
+ }
+ }
+ }
+ printf("[%s]: PASS, 1 to %d buffers of equal size.\n", __FUNCTION__,
+ i);
+
+ /* Reset input buffers with test data */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ for (j = 0; j < NUM_SUPPORTED_BUFFERS; j++) {
+ memset(srcBuff[i][j], i, MAX_DATA_LEN);
+ memset(dstBuff[i][j], i, MAX_DATA_LEN);
+
+ IV[i][j] = (uint64_t)i;
+ }
+ }
+
+ /* Testing multiple buffers of increasing size */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+
+ /* Testing different Buffer sizes*/
+ buffLenInBytes[i] = i + 131 * 8;
+
+ /* Test the encrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+ if (srcBuff[i][0] == NULL) {
+ printf("N buffer failure\n");
+ goto exit;
+ }
+
+ /* Test the Decrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+ if (srcBuff[i][0] == NULL) {
+ printf("N buffer failure\n");
+ goto exit;
+ }
+
+ for (j = 0; j <= i; j++) {
+ if (memcmp(srcBuff[i][j], dstBuff[i][j],
+ buffLenInBytes[j]) != 0) {
+ printf("kasumi_f8_n_buffer increasing sizes, "
+ "srcBuff[%d][%d]\n",
+ i, j);
+ hexdump("Actual:", srcBuff[i][j],
+ buffLenInBytes[j]);
+ hexdump("Expected:", dstBuff[i][j],
+ buffLenInBytes[j]);
+ goto exit;
+ }
+ }
+ }
+
+ printf("[%s]: PASS, 1 to %d buffers of increasing size.\n",
+ __FUNCTION__, i);
+
+ /* Reset input buffers with test data */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ for (j = 0; j < NUM_SUPPORTED_BUFFERS; j++) {
+ memset(srcBuff[i][j], i, MAX_DATA_LEN);
+ memset(dstBuff[i][j], i, MAX_DATA_LEN);
+
+ IV[i][j] = (uint64_t)i;
+ }
+ }
+
+ /* Testing multiple buffers of decreasing size */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+
+ /* Testing Buffer sizes from 3048 to 190 */
+ buffLenInBytes[i] = MAX_DATA_LEN / (1 + i);
+
+ /* Test the encrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+
+ /* Test the Decrypt */
+ IMB_KASUMI_F8_N_BUFFER(mgr, pKeySched, IV[i],
+ (const void * const *)srcBuff[i],
+ (void **)srcBuff[i],
+ buffLenInBytes, i + 1);
+
+ for (j = 0; j <= i; j++) {
+ if (memcmp(srcBuff[i][j], dstBuff[i][j],
+ buffLenInBytes[j]) != 0) {
+ printf("kasumi_f8_n_buffer decreasing sizes, "
+ "numBuffs:%d\n",
+ i);
+ hexdump("Actual:", srcBuff[i][j],
+ buffLenInBytes[j]);
+ hexdump("Expected:", dstBuff[i][j],
+ buffLenInBytes[j]);
+ goto exit;
+ }
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS, 1 to %d buffers of decreasing size.\n",
+ __FUNCTION__, i);
+exit:
+ /* free up test buffers */
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ for (j = 0; j < NUM_SUPPORTED_BUFFERS; j++) {
+ free(srcBuff[i][j]);
+ free(dstBuff[i][j]);
+ }
+ }
+
+ free(pKeySched);
+ return ret;
+}
+
+static int validate_kasumi_f9(MB_MGR *mgr)
+{
+ kasumi_key_sched_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = 16;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t digest[KASUMI_DIGEST_SIZE];
+ int numKasumiF9TestVectors, i;
+ hash_test_vector_t *kasumiF9_test_vectors = NULL;
+ int ret = 1;
+
+ kasumiF9_test_vectors = kasumi_f9_vectors;
+ numKasumiF9TestVectors = numHashTestVectors[0];
+
+ if (!numKasumiF9TestVectors) {
+ printf("No Kasumi vectors found !\n");
+ goto exit;
+ }
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pkey):failed!\n");
+ goto exit;
+ }
+
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc (IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ /* Create the test Data */
+ for (i = 0; i < numKasumiF9TestVectors; i++) {
+ memcpy(pKey, kasumiF9_test_vectors[i].key,
+ kasumiF9_test_vectors[i].keyLenInBytes);
+
+ memcpy(srcBuff, kasumiF9_test_vectors[i].input,
+ kasumiF9_test_vectors[i].lengthInBits);
+
+ memcpy(digest, kasumiF9_test_vectors[i].exp_out,
+ KASUMI_DIGEST_SIZE);
+
+ if (IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F9_KEY_SCHED()error\n");
+ goto exit;
+ }
+
+ /* Test F9 integrity */
+ IMB_KASUMI_F9_1_BUFFER(mgr, pKeySched, srcBuff,
+ kasumiF9_test_vectors[i].lengthInBits,
+ digest);
+
+ /* Compare the digest with the expected in the vectors */
+ if (memcmp(digest, kasumiF9_test_vectors[i].exp_out,
+ KASUMI_DIGEST_SIZE) != 0) {
+ hexdump("Actual", digest, KASUMI_DIGEST_SIZE);
+ hexdump("Expected", kasumiF9_test_vectors[i].exp_out,
+ KASUMI_DIGEST_SIZE);
+ printf("F9 integrity %d Failed\n", i);
+ goto exit;
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS, for %d single buffers.\n", __FUNCTION__,
+ numKasumiF9TestVectors);
+exit:
+ free(pKey);
+ free(pKeySched);
+ return ret;
+}
+
+static int validate_kasumi_f9_user(MB_MGR *mgr)
+{
+ int numKasumiF9IV_TestVectors = 0, i = 0;
+ hash_iv_test_vector_t *kasumiF9_vectors = NULL;
+
+ kasumiF9_vectors = kasumi_f9_IV_vectors;
+ numKasumiF9IV_TestVectors = numHashTestVectors[1];
+
+ kasumi_key_sched_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+
+ uint64_t iv[MAX_IV_LEN];
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t digest[KASUMI_DIGEST_SIZE];
+ uint32_t direction;
+ int ret = 1;
+
+ if (!numKasumiF9IV_TestVectors) {
+ printf("No Kasumi vectors found !\n");
+ goto exit;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pkey):failed!\n");
+ goto exit;
+ }
+
+ pKeySched = malloc(IMB_KASUMI_KEY_SCHED_SIZE(mgr));
+ if (!pKeySched) {
+ printf("malloc (IMB_KASUMI_KEY_SCHED_SIZE()): failed !\n");
+ goto exit;
+ }
+
+ /* Create the test data */
+ for (i = 0; i < numKasumiF9IV_TestVectors; i++) {
+ memcpy(pKey, kasumiF9_vectors[i].key,
+ kasumiF9_vectors[i].keyLenInBytes);
+
+ memcpy(srcBuff, kasumiF9_vectors[i].input,
+ (kasumiF9_vectors[i].lengthInBits + 7 / CHAR_BIT));
+
+ memcpy(iv, kasumiF9_vectors[i].iv,
+ kasumiF9_vectors[i].ivLenInBytes);
+
+ direction = kasumiF9_vectors[i].direction;
+
+ /* Only 1 key sched is used */
+ if (IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, pKey, pKeySched)) {
+ printf("IMB_KASUMI_INIT_F9_KEY_SCHED() error\n");
+ goto exit;
+ }
+ /* Test the integrity for f9_user with IV */
+ IMB_KASUMI_F9_1_BUFFER_USER(mgr, pKeySched, iv[0], srcBuff,
+ kasumiF9_vectors[i].lengthInBits,
+ digest, direction);
+
+ /* Compare the digest with the expected in the vectors */
+ if (memcmp(digest, kasumiF9_vectors[i].exp_out,
+ KASUMI_DIGEST_SIZE) != 0) {
+ hexdump("digest", digest, KASUMI_DIGEST_SIZE);
+ hexdump("exp_out", kasumiF9_vectors[i].exp_out,
+ KASUMI_DIGEST_SIZE);
+ printf("direction %d\n", direction);
+ printf("F9 integrity %d Failed\n", i);
+ goto exit;
+ }
+ }
+
+ ret = 0;
+ printf("[%s]: PASS, for %d single buffers.\n", __FUNCTION__, i);
+exit:
+ free(pKey);
+ free(pKeySched);
+ return ret;
+}
+
+int kasumi_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+ int status = 0;
+
+ /* Do not run the tests for aesni emulation */
+ if (arch == ARCH_NO_AESNI)
+ return 0;
+
+ if (validate_kasumi_f8_1_block(mb_mgr)) {
+ printf("validate_kasumi_f8_1_block: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_kasumi_f8_1_bitblock(mb_mgr)) {
+ printf("validate_kasumi_f8_1_bitblock: FAIL\n");
+ status = 1;
+ }
+ if (validate_kasumi_f8_1_bitblock_offset(mb_mgr)) {
+ printf("validate_kasumi_f8_1_bitblock_linear: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_kasumi_f8_2_blocks(mb_mgr)) {
+ printf("validate_kasumi_f8_2_blocks: FAIL\n");
+ status = 1;
+ }
+ if (validate_kasumi_f8_3_blocks(mb_mgr)) {
+ printf("<F12>validate_kasumi_f8_3_blocks: FAIL\n");
+ status = 1;
+ }
+ if (validate_kasumi_f8_4_blocks(mb_mgr)) {
+ printf("validate_kasumi_f8_4_blocks: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_kasumi_f8_n_blocks(mb_mgr)) {
+ printf("validate_kasumi_f8_n_blocks: FAIL\n");
+ status = 1;
+ }
+ if (validate_kasumi_f9(mb_mgr)) {
+ printf("validate_kasumi_f9: FAIL\n");
+ status = 1;
+ }
+ if (validate_kasumi_f9_user(mb_mgr)) {
+ printf("validate_kasumi_f9_user: FAIL\n");
+ status = 1;
+ }
+ if (!status)
+ printf("ALL TESTS PASSED.\n");
+ else
+ printf("WE HAVE TEST FAILURES !\n");
+
+ return status;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test_vectors.h b/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test_vectors.h
new file mode 100644
index 000000000..0785d2157
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/kasumi_test_vectors.h
@@ -0,0 +1,1159 @@
+/*
+ * Copyright (c) 2009-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __KASUMI_TEST_VECTORS_H__
+#define __KASUMI_TEST_VECTORS_H__
+
+#define MAX_DATA_LEN (3048)
+#define MAX_KEY_LEN (32)
+#define MAX_IV_LEN (16)
+#define NUM_SUPPORTED_BUFFERS (16)
+#define MAX_SIZE_IN_BYTES_1024 (1024)
+#define MAX_BIT_BUFFERS (6)
+
+typedef struct cipher_test_vector_s {
+ uint32_t dataLenInBytes;
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint8_t key[MAX_KEY_LEN];
+ uint8_t iv[MAX_IV_LEN];
+} cipher_test_vector_t;
+typedef struct cipherbit_test_vector_s {
+ uint32_t LenInBits;
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint8_t key[MAX_KEY_LEN];
+ uint8_t iv[MAX_IV_LEN];
+} cipherbit_test_vector_t;
+typedef struct cipherbit_test_linear_vector_s {
+ uint32_t LenInBits[MAX_BIT_BUFFERS];
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint8_t key[MAX_BIT_BUFFERS][MAX_KEY_LEN];
+ uint8_t iv[MAX_BIT_BUFFERS][MAX_IV_LEN];
+} cipherbit_test_linear_vector_t;
+
+typedef struct hash_test_vector_s {
+ uint8_t input[MAX_DATA_LEN];
+ uint32_t lengthInBits;
+ uint8_t key[MAX_DATA_LEN];
+ uint32_t keyLenInBytes;
+ uint8_t exp_out[KASUMI_BLOCK_SIZE];
+} hash_test_vector_t;
+
+typedef struct hash_iv_test_vector_s {
+ uint8_t input[MAX_DATA_LEN];
+ uint32_t lengthInBits;
+ uint8_t key[MAX_DATA_LEN];
+ uint32_t keyLenInBytes;
+ uint8_t exp_out[KASUMI_BLOCK_SIZE];
+ uint8_t iv[MAX_DATA_LEN];
+ uint32_t ivLenInBytes;
+ uint32_t direction;
+} hash_iv_test_vector_t;
+
+static const cipherbit_test_linear_vector_t kasumi_f8_linear_bitvectors = {
+
+ {798, 510, 253, 120, 837},
+ 16,
+ 8,
+ {0x7e, 0xc6, 0x12, 0x72, 0x74, 0x3b, 0xf1, 0x61, 0x47,
+ 0x26, 0x44, 0x6a, 0x6c, 0x38, 0xce, 0xd1, 0x66, 0xf6,
+ 0xca, 0x76, 0xeb, 0x54, 0x30, 0x4, 0x42, 0x86, 0x34,
+ 0x6c, 0xef, 0x13, 0xf, 0x92, 0x92, 0x2b, 0x3, 0x45,
+ 0xd, 0x3a, 0x99, 0x75, 0xe5, 0xbd, 0x2e, 0xa0, 0xeb,
+ 0x55, 0xad, 0x8e, 0x1b, 0x19, 0x9e, 0x3e, 0xc4, 0x31,
+ 0x60, 0x20, 0xe9, 0xa1, 0xb2, 0x85, 0xe7, 0x62, 0x79,
+ 0x53, 0x59, 0xb7, 0xbd, 0xfd, 0x39, 0xbe, 0xf4, 0xb2,
+ 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae, 0xe6,
+ 0x38, 0xbf, 0x5f, 0xd5, 0xa6, 0x6, 0x19, 0x39, 0x1,
+ 0xa0, 0x8f, 0x4a, 0xb4, 0x1a, 0xab, 0x9b, 0x13, 0x48,
+ 0x80, 0x40, 0x44, 0x48, 0xc7, 0x81, 0x80, 0x94, 0xe9,
+ 0xf, 0xf4, 0xfd, 0x5f, 0x8d, 0xd8, 0x1e, 0xac, 0xa0,
+ 0x9e, 0xd6, 0x66, 0xda, 0xc6, 0xef, 0x68, 0xde, 0xa2,
+ 0xaf, 0x31, 0x6a, 0x31, 0x54, 0x34, 0x6f, 0xec, 0xbd,
+ 0x25, 0x18, 0x93, 0xed, 0x40, 0xd9, 0xfe, 0x8d, 0xb3,
+ 0x8e, 0xf1, 0xa3, 0xc4, 0x73, 0xe4, 0xec, 0x54, 0x40,
+ 0xdd, 0xac, 0x8, 0x4c, 0x3e, 0x4, 0xaa, 0x7e, 0x85,
+ 0xa7, 0x69, 0x81, 0xba, 0x68, 0x24, 0xc1, 0xbf, 0xb1,
+ 0xab, 0x48, 0x54, 0x72, 0x2, 0x9b, 0x71, 0xd8, 0x8,
+ 0xce, 0x33, 0xe2, 0xcc, 0x3c, 0xb, 0x5f, 0xc1, 0xf3,
+ 0xde, 0x8a, 0x6d, 0xc6, 0x6b, 0x1f, 0x56, 0xce, 0x22,
+ 0xf, 0xc4, 0x85, 0x9c, 0x62, 0x2b, 0xd2, 0x4e, 0xa1,
+ 0xa, 0x3, 0xf4, 0x20, 0x4c, 0xd, 0xd3, 0x41, 0x26,
+ 0xd, 0xfd, 0xa1, 0x43, 0x59, 0x4c, 0xbc, 0x1e, 0xd7,
+ 0xa2, 0x16, 0x4, 0xcf, 0xbd, 0x58, 0x7a, 0xc6, 0xae,
+ 0x47, 0x23, 0x58, 0x82, 0x78, 0x47, 0x80, 0xda, 0xd,
+ 0x5a, 0x42, 0xa3, 0x90, 0x14, 0xdb, 0x8e, 0x9b, 0x5e,
+ 0x8d, 0x1e, 0xc8, 0x6e, 0x1d, 0x20, 0xda, 0x36, 0xa8,
+ 0xb3, 0x95, 0x62, 0x64, 0xb3, 0x1d, 0x15, 0xf0, 0x31,
+ 0xed, 0x25, 0xe4, 0x69, 0x40, 0x46, 0x71, 0x9f, 0x16,
+ 0x66, 0x5f, 0xe3, 0x1a, 0x70, 0xd9, 0x2c, 0x83, 0x4,
+ 0x3b, 0x50, 0x7d, 0xda, 0xd2, 0x1b, 0xf5, 0xe6, 0x46,
+ 0x98, 0xe0, 0xcf, 0x22, 0x2a, 0x18, 0xc3, 0xa2, 0xf1,
+ 0xcc, 0x3b, 0x22, 0xcc, 0x3d, 0x4c, 0x37, 0x96, 0x58,
+ 0x0},
+ {0xd1, 0xe2, 0xde, 0x70, 0xee, 0xf8, 0x6c, 0x69, 0x64, 0xfb, 0x54, 0x2b,
+ 0xc2, 0xd4, 0x60, 0xaa, 0xbf, 0xaa, 0x10, 0xa4, 0xa0, 0x93, 0x26, 0x2b,
+ 0x7d, 0x19, 0x9e, 0x70, 0x6f, 0xc2, 0xd4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xf3, 0xa9, 0x73, 0x1, 0x26, 0x82, 0xe4, 0x1c, 0x4e, 0x2b, 0x2,
+ 0xbe, 0x20, 0x17, 0xb7, 0x25, 0x3b, 0xbf, 0x93, 0x9, 0xde, 0x58, 0x19,
+ 0xcb, 0x42, 0xe8, 0x19, 0x56, 0xf4, 0xc9, 0x9b, 0xc9, 0x76, 0x5c, 0xaf,
+ 0x53, 0xb1, 0xd0, 0xbb, 0x82, 0x79, 0x82, 0x6a, 0xdb, 0xbc, 0x55, 0x22,
+ 0xe9, 0x15, 0xc1, 0x20, 0xa6, 0x18, 0xa5, 0xa7, 0xf5, 0xe8, 0x97, 0x8,
+ 0x93, 0x39, 0x65, 0xc, 0xf7, 0xab, 0x31, 0xf0, 0x56, 0x8, 0x72, 0xaa,
+ 0x27, 0xbb, 0x2b, 0x7a, 0x6d, 0x6f, 0x4d, 0x85, 0x2f, 0x43, 0x21, 0x6,
+ 0x75, 0xc4, 0xe, 0x17, 0x76, 0xf9, 0x61, 0x27, 0xbc, 0x6e, 0xb1, 0x6b,
+ 0xa2, 0xc5, 0x29, 0x6c, 0x29, 0x9d, 0x5, 0x48, 0x7a, 0xd3, 0x80, 0x2e,
+ 0xe7, 0xb3, 0xcf, 0xa7, 0xdf, 0x32, 0xe7, 0x2b, 0x9d, 0x5, 0x4b, 0x5f,
+ 0xd3, 0x8a, 0x80, 0xd2, 0xdb, 0xa8, 0x3, 0xb5, 0xbb, 0x94, 0x31, 0xbb,
+ 0x1e, 0x98, 0xbd, 0x11, 0xb9, 0x3d, 0xb7, 0xc3, 0xd4, 0x51, 0x36, 0x55,
+ 0x9b, 0xb8, 0x6a, 0x29, 0x5a, 0xa2, 0x4, 0xec, 0xbe, 0xbf, 0x6f, 0x7a,
+ 0x51, 0x1, 0x51, 0x4d, 0xe4, 0x96, 0x54, 0x1, 0xe3, 0x3d, 0x94, 0x50,
+ 0x8d, 0x25, 0xf7, 0x2d, 0x6, 0x12, 0xee, 0xd9, 0xb2, 0x6e, 0x95, 0x57,
+ 0x61, 0x26, 0xff, 0x94, 0x8c, 0xab, 0xdb, 0xc5, 0xd6, 0x83, 0x1d, 0x62,
+ 0xbc, 0xe6, 0xc8, 0x20, 0xdd, 0x5f, 0x44, 0xfe, 0x8c, 0xae, 0x2, 0xbc,
+ 0x65, 0xcf, 0xef, 0x21, 0xb, 0xab, 0x30, 0xbc, 0x69, 0x1, 0x20, 0x10,
+ 0x36, 0x83, 0xe7, 0xd3, 0xc, 0xf6, 0x2, 0xcf, 0xb1, 0xa8, 0xa2, 0x2c,
+ 0xfe, 0x8, 0x6a, 0x16, 0xe4, 0xc9, 0xa7, 0x2b, 0x75, 0xe0, 0x65, 0xe3,
+ 0x6, 0xa6, 0x96, 0xf8, 0xba, 0x3b, 0xa6, 0x5e, 0xe6, 0xae, 0xd2, 0x51,
+ 0xa8, 0x18, 0xbd, 0x3f, 0x89, 0x70, 0xca, 0x4a, 0x38, 0xfc, 0x51, 0x4a,
+ 0xf9, 0x39, 0x73, 0x47, 0xe0, 0x38, 0xac, 0xd8, 0x3e, 0xc7, 0x16, 0x93,
+ 0x72, 0x2c, 0xcc, 0x0},
+ {
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0xEF, 0xA8, 0xB2, 0x22, 0x9E, 0x72, 0x0C, 0x2A, 0x7C, 0x36, 0xEA, 0x55,
+ 0xE9, 0x60, 0x56, 0x95},
+ {0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C, 0x40, 0x35, 0xC6, 0x68,
+ 0x0A, 0xF8, 0xC6, 0xD1},
+ {0x5A, 0xCB, 0x1D, 0x64, 0x4C, 0x0D, 0x51, 0x20, 0x4E, 0xA5, 0xF1, 0x45,
+ 0x10, 0x10, 0xD8, 0x52},
+ {0x60, 0x90, 0xEA, 0xE0, 0x4C, 0x83, 0x70, 0x6E, 0xEC, 0xBF, 0x65, 0x2B,
+ 0xE8, 0xE3, 0x65, 0x66},
+ },
+ {
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00},
+ {0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00},
+ {0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00},
+ {0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00},
+ }
+
+};
+
+static const cipherbit_test_vector_t kasumi_f8_bitvectors[] = {
+ {798,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x83},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer0*/
+ {510,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xDB},
+
+ {0x3D, 0xEA, 0xCC, 0x7C, 0x15, 0x82, 0x1C, 0xAA, 0x89, 0xEE, 0xCA,
+ 0xDE, 0x9B, 0x5B, 0xD3, 0x61, 0x4B, 0xD0, 0xC8, 0x41, 0x9D, 0x71,
+ 0x03, 0x85, 0xDD, 0xBE, 0x58, 0x49, 0xEF, 0x1B, 0xAC, 0x5A, 0xE8,
+ 0xB1, 0x4A, 0x5B, 0x0A, 0x67, 0x41, 0x52, 0x1E, 0xB4, 0xE0, 0x0B,
+ 0xB9, 0xEC, 0xF3, 0xE9, 0xF7, 0xCC, 0xB9, 0xCA, 0xE7, 0x41, 0x52,
+ 0xD7, 0xF4, 0xE2, 0xA0, 0x34, 0xB6, 0xEA, 0x00, 0xEF},
+
+ {0xEF, 0xA8, 0xB2, 0x22, 0x9E, 0x72, 0x0C, 0x2A, 0x7C, 0x36, 0xEA, 0x55,
+ 0xE9, 0x60, 0x56, 0x95},
+ {0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00}}, /*buffer1*/
+ {253,
+ 16,
+ 8,
+ {0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A, 0xB4, 0x85, 0x47,
+ 0x20, 0x29, 0xB7, 0x1D, 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xC3, 0xC0,
+ 0xB5, 0xFC, 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x66, 0xB1, 0xF7},
+ {0x5B, 0xB9, 0x43, 0x1B, 0xB1, 0xE9, 0x8B, 0xD1, 0x1B, 0x93, 0xDB,
+ 0x7C, 0x3D, 0x45, 0x13, 0x65, 0x59, 0xBB, 0x86, 0xA2, 0x95, 0xAA,
+ 0x20, 0x4E, 0xCB, 0xEB, 0xF6, 0xF7, 0xA5, 0x10, 0x15, 0x17},
+ {0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C, 0x40, 0x35, 0xC6, 0x68,
+ 0x0A, 0xF8, 0xC6, 0xD1},
+ {0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00}}, /*buffer2*/
+ {120,
+ 16,
+ 8,
+ {0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4, 0x57, 0xA4, 0x9D, 0x42,
+ 0x14, 0x07, 0xE8},
+ {0x9B, 0xC9, 0x2C, 0xA8, 0x03, 0xC6, 0x7B, 0x28, 0xA1, 0x1A, 0x4B, 0xEE,
+ 0x5A, 0x0C, 0x25},
+ {0x5A, 0xCB, 0x1D, 0x64, 0x4C, 0x0D, 0x51, 0x20, 0x4E, 0xA5, 0xF1, 0x45,
+ 0x10, 0x10, 0xD8, 0x52},
+ {0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00}}, /*buffer3*/
+ {837,
+ 16,
+ 8,
+ {0x40, 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x42, 0x86, 0xB2, 0x99,
+ 0x78, 0x3D, 0xAF, 0x44, 0x2C, 0x09, 0x9F, 0x7A, 0xB0, 0xF5, 0x8D, 0x5C,
+ 0x8E, 0x46, 0xB1, 0x04, 0xF0, 0x8F, 0x01, 0xB4, 0x1A, 0xB4, 0x85, 0x47,
+ 0x20, 0x29, 0xB7, 0x1D, 0x36, 0xBD, 0x1A, 0x3D, 0x90, 0xDC, 0x3A, 0x41,
+ 0xB4, 0x6D, 0x51, 0x67, 0x2A, 0xC4, 0xC9, 0x66, 0x3A, 0x2B, 0xE0, 0x63,
+ 0xDA, 0x4B, 0xC8, 0xD2, 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xCC, 0xBF, 0xC6,
+ 0x34, 0xE1, 0xB2, 0x59, 0x06, 0x08, 0x76, 0xA0, 0xFB, 0xB5, 0xA4, 0x37,
+ 0xEB, 0xCC, 0x8D, 0x31, 0xC1, 0x9E, 0x44, 0x54, 0x31, 0x87, 0x45, 0xE3,
+ 0x98, 0x76, 0x45, 0x98, 0x7A, 0x98, 0x6F, 0x2C, 0xB7},
+ {0xDD, 0xB3, 0x64, 0xDD, 0x2A, 0xAE, 0xC2, 0x4D, 0xFF, 0x29, 0x19,
+ 0x57, 0xB7, 0x8B, 0xAD, 0x06, 0x3A, 0xC5, 0x79, 0xCD, 0x90, 0x41,
+ 0xBA, 0xBE, 0x89, 0xFD, 0x19, 0x5C, 0x05, 0x78, 0xCB, 0x9F, 0xDE,
+ 0x42, 0x17, 0x56, 0x61, 0x78, 0xD2, 0x02, 0x40, 0x20, 0x6D, 0x07,
+ 0xCF, 0xA6, 0x19, 0xEC, 0x05, 0x9F, 0x63, 0x51, 0x44, 0x59, 0xFC,
+ 0x10, 0xD4, 0x2D, 0xC9, 0x93, 0x4E, 0x56, 0xEB, 0xC0, 0xCB, 0xC6,
+ 0x0D, 0x4D, 0x2D, 0xF1, 0x74, 0x77, 0x4C, 0xBD, 0xCD, 0x5D, 0xA4,
+ 0xA3, 0x50, 0x31, 0x7A, 0x7F, 0x12, 0xE1, 0x94, 0x94, 0x71, 0xF8,
+ 0xA2, 0x95, 0xF2, 0x72, 0xE6, 0x8F, 0xC0, 0x71, 0x59, 0xB0, 0x7D,
+ 0x8E, 0x2D, 0x26, 0xE4, 0x59,
+ 0x9F},
+ {0x60, 0x90, 0xEA, 0xE0, 0x4C, 0x83, 0x70, 0x6E, 0xEC, 0xBF, 0x65, 0x2B,
+ 0xE8, 0xE3, 0x65, 0x66},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00}}, /*buffer3*/
+};
+
+static cipher_test_vector_t kasumi_f8_vectors[] = {
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer0*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer1*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer2*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer3*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer4*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer5*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer6*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer7*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer8*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer9*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer10*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer11*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer12*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer13*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer14*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer15*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer16*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer17*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer18*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer19*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer20*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer21*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer22*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer23*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer24*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer25*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer26*/
+ {64,
+ 16,
+ 8,
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8},
+ {0xBF, 0x35, 0xDE, 0x33, 0x7A, 0xA3, 0xB8, 0x32, 0x60, 0x20, 0x2F,
+ 0x16, 0x4D, 0x9A, 0xA9, 0xD0, 0xF1, 0x7B, 0x6F, 0x4B, 0xFD, 0x76,
+ 0xAD, 0xF5, 0x08, 0x37, 0x01, 0xD0, 0xDA, 0x5D, 0x8E, 0x16, 0x9C,
+ 0x83, 0x05, 0x65, 0x5B, 0xED, 0xCB, 0x56, 0xD2, 0xE4, 0x0F, 0x28,
+ 0x14, 0xA7, 0xEE, 0x7D, 0xB9, 0xC0, 0xB2, 0x9C, 0xF1, 0x3D, 0xB4,
+ 0xB1, 0xF3, 0x70, 0x6B, 0xB6, 0xB3, 0x81, 0xF8, 0x92},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}}, /*buffer27*/
+ {100,
+ 16,
+ 8,
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44, 0x6A,
+ 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92, 0x2B, 0x03, 0x45,
+ 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20, 0xE9, 0xA1, 0xB2, 0x85,
+ 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F,
+ 0xD5, 0xA6, 0x06, 0x19, 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80},
+ {0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69, 0x64, 0xFB, 0x54, 0x2B,
+ 0xC2, 0xD4, 0x60, 0xAA, 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89, 0x15, 0x53, 0x29, 0x69,
+ 0x10, 0xF3, 0xA9, 0x73, 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93, 0x09, 0xDE, 0x58, 0x19,
+ 0xCB, 0x42, 0xE8, 0x19, 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A, 0xDB, 0xBC, 0x55, 0x22,
+ 0xE9, 0x15, 0xC1, 0x20, 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00}} /*buffer28*/
+};
+
+static hash_test_vector_t kasumi_f9_vectors[] = {
+ {/*input*/
+ {0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49, 0x6B, 0x22, 0x77,
+ 0x37, 0x29, 0x6F, 0x39, 0x3C, 0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87,
+ 0xE2, 0xE8, 0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE2},
+ /*inputlength*/
+ 32,
+ /*key*/
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ /*keylength*/
+ 16,
+ /*expectedout*/
+ {0xF6, 0x3B, 0xD7, 0x2C}},
+ {{0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2, 0xB5, 0x92,
+ 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0, 0x0B, 0x73, 0x71, 0x09,
+ 0xF8, 0xB6, 0xC8, 0xDD, 0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33,
+ 0x98, 0x1C, 0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC3},
+ 40,
+ {0xD4, 0x2F, 0x68, 0x24, 0x28, 0x20, 0x1C, 0xAF, 0xCD, 0x9F, 0x97, 0x94,
+ 0x5E, 0x6D, 0xE7, 0xB7},
+ 16,
+ {0xA9, 0xDA, 0xF1, 0xFF}},
+ {{0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, 0xD0, 0xA7, 0xD4, 0x63,
+ 0xDF, 0x9F, 0xB2, 0xB2, 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1,
+ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29, 0x07, 0xFB, 0x64, 0x8B,
+ 0x65, 0x99, 0xAA, 0xA0, 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20,
+ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09, 0xC0},
+ 57,
+ {0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, 0x1E, 0x26, 0x98, 0xD2,
+ 0xE2, 0x2A, 0xD5, 0x7E},
+ 16,
+ {0xDD, 0x7D, 0xFA, 0xDD}},
+ {{0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A, 0xB3, 0xD3,
+ 0xC9, 0x17, 0x0A, 0x4E, 0x16, 0x32, 0xF6, 0x0F, 0x86, 0x10,
+ 0x13, 0xD2, 0x2D, 0x84, 0xB7, 0x26, 0xB6, 0xA2, 0x78, 0xD8,
+ 0x02, 0xD1, 0xEE, 0xAF, 0x13, 0x21, 0xBA, 0x59, 0x29, 0xDF},
+ 40,
+ {0x7E, 0x5E, 0x94, 0x43, 0x1E, 0x11, 0xD7, 0x38, 0x28, 0xD7, 0x39, 0xCC,
+ 0x6C, 0xED, 0x45, 0x73},
+ 16,
+ {0x2B, 0xEE, 0xF3, 0xAC}
+
+ },
+ {{0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A, 0x59, 0x32,
+ 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA, 0x33, 0xD8, 0xAC, 0x18,
+ 0x8A, 0xC5, 0x4F, 0x34, 0x6F, 0xAD, 0x10, 0xBF, 0x9D, 0xEE,
+ 0x29, 0x20, 0xB4, 0x3B, 0xD0, 0xC5, 0x3A, 0x91, 0x5C, 0xB7,
+ 0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF3, 0x80},
+ 49,
+ {0xFD, 0xB9, 0xCF, 0xDF, 0x28, 0x93, 0x6C, 0xC4, 0x83, 0xA3, 0x18, 0x69,
+ 0xD8, 0x1B, 0x8F, 0xAB},
+ 16,
+ {0x15, 0x37, 0xD3, 0x16}},
+ {{0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A, 0xD3, 0xC5, 0x38, 0x39,
+ 0x62, 0x68, 0x20, 0x71, 0x77, 0x65, 0x66, 0x76, 0x20, 0x32, 0x38, 0x37,
+ 0x63, 0x62, 0x40, 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A, 0xB4,
+ 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D, 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xC3,
+ 0xC0, 0xB5, 0xFC, 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x80},
+ 57,
+ {0x68, 0x32, 0xA6, 0x5C, 0xFF, 0x44, 0x73, 0x62, 0x1E, 0xBD, 0xD4, 0xBA,
+ 0x26, 0xA9, 0x21, 0xFE},
+ 16,
+ {0x8B, 0x2D, 0x57, 0x0F}},
+ {{0xC7, 0x59, 0x0E, 0xA9, 0x57, 0xD5, 0xDF, 0x7D, 0xBB, 0xB0, 0x57,
+ 0x03, 0x88, 0x09, 0x49, 0x6B, 0xCF, 0xF8, 0x6D, 0x6F, 0xBC, 0x8C,
+ 0xE5, 0xB1, 0x35, 0xA0, 0x6B, 0x16, 0x60, 0x54, 0xF2, 0xD5, 0x65,
+ 0xBE, 0x8A, 0xCE, 0x75, 0xDC, 0x85, 0x1E, 0x0B, 0xCD, 0xD8, 0xF0,
+ 0x71, 0x41, 0xC4, 0x95, 0x87, 0x2F, 0xB5, 0xD8, 0xC0, 0xC6, 0x6A,
+ 0x8B, 0x6D, 0xA5, 0x56, 0x66, 0x3E, 0x4E, 0x46, 0x12, 0x05, 0xD8,
+ 0x45, 0x80, 0xBE, 0xE5, 0xBC, 0x7E, 0x80},
+ 73,
+ {0xD3, 0x41, 0x9B, 0xE8, 0x21, 0x08, 0x7A, 0xCD, 0x02, 0x12, 0x3A, 0x92,
+ 0x48, 0x03, 0x33, 0x59},
+ 16,
+ {0x02, 0x15, 0x81, 0x70}},
+ {{0x36, 0xAF, 0x61, 0x44, 0x4F, 0x30, 0x2A, 0xD2, 0x35, 0xC6, 0x87, 0x16,
+ 0x63, 0x3C, 0x66, 0xFB, 0x75, 0x0C, 0x26, 0x68, 0x65, 0xD5, 0x3C, 0x11,
+ 0xEA, 0x05, 0xB1, 0xE9, 0xFA, 0x49, 0xC8, 0x39, 0x8D, 0x48, 0xE1, 0xEF,
+ 0xA5, 0x90, 0x9D, 0x39, 0x47, 0x90, 0x28, 0x37, 0xF5, 0xAE, 0x96, 0xD5,
+ 0xA0, 0x5B, 0xC8, 0xD6, 0x1C, 0xA8, 0xDB, 0xEF, 0x1B, 0x13, 0xA4, 0xB4,
+ 0xAB, 0xFE, 0x4F, 0xB1, 0x00, 0x60, 0x45, 0xB6, 0x74, 0xBB, 0x54, 0x72,
+ 0x93, 0x04, 0xC3, 0x82, 0xBE, 0x53, 0xA5, 0xAF, 0x05, 0x55, 0x61, 0x76,
+ 0xF6, 0xEA, 0xA2, 0xEF, 0x1D, 0x05, 0xE4, 0xB0, 0x83, 0x18, 0x1E, 0xE6,
+ 0x74, 0xCD, 0xA5, 0xA4, 0x85, 0xF7, 0x4D, 0x7A, 0xC0},
+ 105,
+ {0x83, 0xFD, 0x23, 0xA2, 0x44, 0xA7, 0x4C, 0xF3, 0x58, 0xDA, 0x30, 0x19,
+ 0xF1, 0x72, 0x26, 0x35},
+ 16,
+ {0x95, 0xAE, 0x41, 0xBA}},
+ {{0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37, 0x10, 0xBF, 0xFF, 0x83,
+ 0x9E, 0x0C, 0x71, 0x65, 0x8D, 0xBB, 0x2D, 0x17, 0x07, 0xE1, 0x45, 0x72,
+ 0x4F, 0x41, 0xC1, 0x6F, 0x48, 0xBF, 0x40, 0x3C, 0x3B, 0x18, 0xE3, 0x8F,
+ 0xD5, 0xD1, 0x66, 0x3B, 0x6F, 0x6D, 0x90, 0x01, 0x93, 0xE3, 0xCE, 0xA8,
+ 0xBB, 0x4F, 0x1B, 0x4F, 0x5B, 0xE8, 0x22, 0x03, 0x22, 0x32, 0xA7, 0x8D,
+ 0x7D, 0x75, 0x23, 0x8D, 0x5E, 0x6D, 0xAE, 0xCD, 0x3B, 0x43, 0x22, 0xCF,
+ 0x59, 0xBC, 0x7E, 0xA8, 0x4A, 0xB1, 0x88, 0x11, 0xB5, 0xBF, 0xB7, 0xBC,
+ 0x55, 0x3F, 0x4F, 0xE4, 0x44, 0x78, 0xCE, 0x28, 0x7A, 0x14, 0x87, 0x99,
+ 0x90, 0xD1, 0x8D, 0x12, 0xCA, 0x79, 0xD2, 0xC8, 0x55, 0x14, 0x90, 0x21,
+ 0xCD, 0x5C, 0xE8, 0xCA, 0x03, 0x71, 0xCA, 0x04, 0xFC, 0xCE, 0x14, 0x3E,
+ 0x3D, 0x7C, 0xFE, 0xE9, 0x45, 0x85, 0xB5, 0x88, 0x5C, 0xAC, 0x46, 0x06,
+ 0x8B, 0xC0},
+ 134,
+ {0xF4, 0xEB, 0xEC, 0x69, 0xE7, 0x3E, 0xAF, 0x2E, 0xB2, 0xCF, 0x6A, 0xF4,
+ 0xB3, 0x12, 0x0F, 0xFD},
+ 16,
+ {0xC3, 0x83, 0x83, 0x9D}},
+ {{0x78, 0x27, 0xFA, 0xB2, 0xA5, 0x6C, 0x6C, 0xA2, 0x70, 0xDE, 0xDF, 0x2D,
+ 0xC4, 0x2C, 0x5C, 0xBD, 0x3A, 0x96, 0xF8, 0xA0, 0xB1, 0x14, 0x18, 0xB3,
+ 0x60, 0x8D, 0x57, 0x33, 0x60, 0x4A, 0x2C, 0xD3, 0x6A, 0xAB, 0xC7, 0x0C,
+ 0xE3, 0x19, 0x3B, 0xB5, 0x15, 0x3B, 0xE2, 0xD3, 0xC0, 0x6D, 0xFD, 0xB2,
+ 0xD1, 0x6E, 0x9C, 0x35, 0x71, 0x58, 0xBE, 0x6A, 0x41, 0xD6, 0xB8, 0x61,
+ 0xE4, 0x91, 0xDB, 0x3F, 0xBF, 0xEB, 0x51, 0x8E, 0xFC, 0xF0, 0x48, 0xD7,
+ 0xD5, 0x89, 0x53, 0x73, 0x0F, 0xF3, 0x0C, 0x9E, 0xC4, 0x70, 0xFF, 0xCD,
+ 0x66, 0x3D, 0xC3, 0x42, 0x01, 0xC3, 0x6A, 0xDD, 0xC0, 0x11, 0x1C, 0x35,
+ 0xB3, 0x8A, 0xFE, 0xE7, 0xCF, 0xDB, 0x58, 0x2E, 0x37, 0x31, 0xF8, 0xB4,
+ 0xBA, 0xA8, 0xD1, 0xA8, 0x9C, 0x06, 0xE8, 0x11, 0x99, 0xA9, 0x71, 0x62,
+ 0x27, 0xBE, 0x34, 0x4E, 0xFC, 0xB4, 0x36, 0xDD, 0xD0, 0xF0, 0x96, 0xC0,
+ 0x64, 0xC3, 0xB5, 0xE2, 0xC3, 0x99, 0x99, 0x3F, 0xC7, 0x73, 0x94, 0xF9,
+ 0xE0, 0x97, 0x20, 0xA8, 0x11, 0x85, 0x0E, 0xF2, 0x3B, 0x2E, 0xE0, 0x5D,
+ 0x9E, 0x61, 0x73, 0x60, 0x9D, 0x86, 0xE1, 0xC0, 0xC1, 0x8E, 0xA5, 0x1A,
+ 0x01, 0x2A, 0x00, 0xBB, 0x41, 0x3B, 0x9C, 0xB8, 0x18, 0x8A, 0x70, 0x3C,
+ 0xD6, 0xBA, 0xE3, 0x1C, 0xC6, 0x7B, 0x34, 0xB1, 0xB0, 0x00, 0x19, 0xE6,
+ 0xA2, 0xB2, 0xA6, 0x90, 0xF0, 0x26, 0x71, 0xFE, 0x7C, 0x9E, 0xF8, 0xDE,
+ 0xC0, 0x09, 0x4E, 0x53, 0x37, 0x63, 0x47, 0x8D, 0x58, 0xD2, 0xC5, 0xF5,
+ 0xB8, 0x27, 0xA0, 0x14, 0x8C, 0x59, 0x48, 0xA9, 0x69, 0x31, 0xAC, 0xF8,
+ 0x4F, 0x46, 0x5A, 0x64, 0xE6, 0x2C, 0xE7, 0x40, 0x07, 0xE9, 0x91, 0xE3,
+ 0x7E, 0xA8, 0x23, 0xFA, 0x0F, 0xB2, 0x19, 0x23, 0xB7, 0x99, 0x05, 0xB7,
+ 0x33, 0xB6, 0x31, 0xE6, 0xC7, 0xD6, 0x86, 0x0A, 0x38, 0x31, 0xAC, 0x35,
+ 0x1A, 0x9C, 0x73, 0x0C, 0x52, 0xFF, 0x72, 0xD9, 0xD3, 0x08, 0xEE, 0xDB,
+ 0xAB, 0x21, 0xFD, 0xE1, 0x43, 0xA0, 0xEA, 0x17, 0xE2, 0x3E, 0xDC, 0x1F,
+ 0x74, 0xCB, 0xB3, 0x63, 0x8A, 0x20, 0x33, 0xAA, 0xA1, 0x54, 0x64, 0xEA,
+ 0xA7, 0x33, 0x38, 0x5D, 0xBB, 0xEB, 0x6F, 0xD7, 0x35, 0x09, 0xB8, 0x57,
+ 0xE6, 0xA4, 0x19, 0xDC, 0xA1, 0xD8, 0x90, 0x7A, 0xF9, 0x77, 0xFB, 0xAC,
+ 0x4D, 0xFA, 0x35, 0xEF},
+ 328,
+ {0x5D, 0x0A, 0x80, 0xD8, 0x13, 0x4A, 0xE1, 0x96, 0x77, 0x82, 0x4B, 0x67,
+ 0x1E, 0x83, 0x8A, 0xF4},
+ 16,
+ {0x3A, 0xE4, 0xBF, 0xF3}}};
+
+static hash_iv_test_vector_t kasumi_f9_IV_vectors[] = {
+ {
+ // 3GPP specs Test Set 1
+ /*input*/
+ {0x6B, 0x22, 0x77, 0x37, 0x29, 0x6F, 0x39, 0x3C,
+ 0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87, 0xE2, 0xE8,
+ 0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE0},
+ /*length*/
+ 189,
+ /*key*/
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49, 0x10,
+ 0x48, 0x81, 0xFF, 0x48},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0xF6, 0x3B, 0xD7, 0x2C},
+ /*iv*/
+ {0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49},
+ /*ivLeninbytes*/
+ 8,
+ /*direction*/
+ 0,
+ },
+ {
+ // 3GPP specs Test Set 2
+ /*input*/
+ {0xB5, 0x92, 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0, 0x0B, 0x73, 0x71,
+ 0x09, 0xF8, 0xB6, 0xC8, 0xDD, 0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33,
+ 0x98, 0x1C, 0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC0},
+ /*length*/
+ 254,
+ /*key*/
+ {0xD4, 0x2F, 0x68, 0x24, 0x28, 0x20, 0x1C, 0xAF, 0xCD, 0x9F, 0x97, 0x94,
+ 0x5E, 0x6D, 0xE7, 0xB7},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0xA9, 0xDA, 0xF1, 0xFF},
+ /*iv*/
+ {0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2},
+ /*ivLeninbytes*/
+ 8,
+ /*direction*/
+ 1,
+ },
+ {
+ // 3GPP specs Test Set 3
+ /*input*/
+ {0x59, 0x32, 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA, 0x33, 0xD8,
+ 0xAC, 0x18, 0x8A, 0xC5, 0x4F, 0x34, 0x6F, 0xAD, 0x10, 0xBF,
+ 0x9D, 0xEE, 0x29, 0x20, 0xB4, 0x3B, 0xD0, 0xC5, 0x3A, 0x91,
+ 0x5C, 0xB7, 0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF2},
+ /*length*/
+ 319,
+ /*key*/
+ {0xFD, 0xB9, 0xCF, 0xDF, 0x28, 0x93, 0x6C, 0xC4, 0x83, 0xA3, 0x18, 0x69,
+ 0xD8, 0x1B, 0x8F, 0xAB},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x15, 0x37, 0xD3, 0x16},
+ /*iv*/
+ {0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A},
+ /*ivLeninbytes*/
+ 8,
+ /*direction*/
+ 1,
+ },
+ {
+ // 3GPP specs Test Set 4
+ /*input*/
+ {0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, 0x78, 0x83,
+ 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, 0x72, 0xBD, 0x97, 0x0C,
+ 0x14, 0x73, 0xE1, 0x29, 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99,
+ 0xAA, 0xA0, 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20,
+ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09},
+ /*length*/
+ 384,
+ /*key*/
+ {0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, 0x1E, 0x26, 0x98, 0xD2,
+ 0xE2, 0x2A, 0xD5, 0x7E},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0xDD, 0x7D, 0xFA, 0xDD},
+ /*iv*/
+ {0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD},
+ /*ivLeninbytes*/
+ 8,
+ /*direction*/
+ 1,
+ },
+ {
+ // 3GPP specs Test Set 5
+ /*input*/
+ {0x10, 0xBF, 0xFF, 0x83, 0x9E, 0x0C, 0x71, 0x65, 0x8D, 0xBB, 0x2D, 0x17,
+ 0x07, 0xE1, 0x45, 0x72, 0x4F, 0x41, 0xC1, 0x6F, 0x48, 0xBF, 0x40, 0x3C,
+ 0x3B, 0x18, 0xE3, 0x8F, 0xD5, 0xD1, 0x66, 0x3B, 0x6F, 0x6D, 0x90, 0x01,
+ 0x93, 0xE3, 0xCE, 0xA8, 0xBB, 0x4F, 0x1B, 0x4F, 0x5B, 0xE8, 0x22, 0x03,
+ 0x22, 0x32, 0xA7, 0x8D, 0x7D, 0x75, 0x23, 0x8D, 0x5E, 0x6D, 0xAE, 0xCD,
+ 0x3B, 0x43, 0x22, 0xCF, 0x59, 0xBC, 0x7E, 0xA8, 0x4A, 0xB1, 0x88, 0x11,
+ 0xB5, 0xBF, 0xB7, 0xBC, 0x55, 0x3F, 0x4F, 0xE4, 0x44, 0x78, 0xCE, 0x28,
+ 0x7A, 0x14, 0x87, 0x99, 0x90, 0xD1, 0x8D, 0x12, 0xCA, 0x79, 0xD2, 0xC8,
+ 0x55, 0x14, 0x90, 0x21, 0xCD, 0x5C, 0xE8, 0xCA, 0x03, 0x71, 0xCA, 0x04,
+ 0xFC, 0xCE, 0x14, 0x3E, 0x3D, 0x7C, 0xFE, 0xE9, 0x45, 0x85, 0xB5, 0x88,
+ 0x5C, 0xAC, 0x46, 0x06, 0x8B},
+ /*length*/
+ 1000,
+ /*key*/
+ {0xF4, 0xEB, 0xEC, 0x69, 0xE7, 0x3E, 0xAF, 0x2E, 0xB2, 0xCF, 0x6A, 0xF4,
+ 0xB3, 0x12, 0x0F, 0xFD},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0xC3, 0x83, 0x83, 0x9D},
+ /*iv*/
+ {0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37},
+ /*ivLeninbytes*/
+ 8,
+ /*direction*/
+ 1,
+ }};
+
+const cipher_test_vector_t *cipher_test_vectors[] = {kasumi_f8_vectors};
+const uint32_t numCipherTestVectors[] = {
+ sizeof(kasumi_f8_vectors) / sizeof(cipher_test_vector_t),
+ sizeof(kasumi_f8_bitvectors) / sizeof(cipherbit_test_vector_t)};
+
+const uint32_t numHashTestVectors[] = {
+ sizeof(kasumi_f9_vectors) / sizeof(hash_test_vector_t),
+ sizeof(kasumi_f9_IV_vectors) / sizeof(hash_iv_test_vector_t)};
+
+#endif /*__KASUMI_TEST_VECTORS_H__*/
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/main.c b/src/spdk/intel-ipsec-mb/LibTestApp/main.c
new file mode 100644
index 000000000..bd941e593
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/main.c
@@ -0,0 +1,314 @@
+/*****************************************************************************
+ Copyright (c) 2012-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "gcm_ctr_vectors_test.h"
+#include "customop_test.h"
+#include "utils.h"
+
+extern int des_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int ccm_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int cmac_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int hmac_sha1_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int hmac_sha256_sha512_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr);
+extern int hmac_md5_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int aes_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int ecb_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int sha_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int chained_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int api_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int pon_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int zuc_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int kasumi_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int snow3g_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+extern int direct_api_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+#include "do_test.h"
+
+static void
+usage(const char *name)
+{
+ fprintf(stderr,
+ "Usage: %s [args], where args are zero or more\n"
+ "--no-aesni-emu: Don't do AESNI emulation\n"
+ "--no-avx512: Don't do AVX512\n"
+ "--no-avx2: Don't do AVX2\n"
+ "--no-avx: Don't do AVX\n"
+ "--no-sse: Don't do SSE\n"
+ "--no-gcm: Don't run GCM tests\n"
+ "--auto-detect: auto detects current architecture "
+ "to run the tests\n"
+ "--shani-on: use SHA extensions, default: auto-detect\n"
+ "--shani-off: don't use SHA extensions\n", name);
+}
+
+static void
+print_hw_features(void)
+{
+ const struct {
+ uint64_t feat_val;
+ const char *feat_name;
+ } feat_tab[] = {
+ { IMB_FEATURE_SHANI, "SHANI" },
+ { IMB_FEATURE_AESNI, "AESNI" },
+ { IMB_FEATURE_PCLMULQDQ, "PCLMULQDQ" },
+ { IMB_FEATURE_CMOV, "CMOV" },
+ { IMB_FEATURE_SSE4_2, "SSE4.2" },
+ { IMB_FEATURE_AVX, "AVX" },
+ { IMB_FEATURE_AVX2, "AVX2" },
+ { IMB_FEATURE_AVX512_SKX, "AVX512(SKX)" },
+ { IMB_FEATURE_VAES, "VAES" },
+ { IMB_FEATURE_VPCLMULQDQ, "VPCLMULQDQ" },
+ };
+ MB_MGR *p_mgr = NULL;
+ unsigned i;
+
+ printf("Detected hardware features:\n");
+
+ p_mgr = alloc_mb_mgr(0);
+ if (p_mgr == NULL) {
+ printf("\tERROR\n");
+ return;
+ }
+
+ for (i = 0; i < IMB_DIM(feat_tab); i++) {
+ const uint64_t val = feat_tab[i].feat_val;
+
+ printf("\t%-*.*s : %s\n", 12, 12, feat_tab[i].feat_name,
+ ((p_mgr->features & val) == val) ? "OK" : "n/a");
+ }
+
+ free_mb_mgr(p_mgr);
+}
+
+static void
+detect_arch(int *p_do_aesni_emu, int *p_do_sse, int *p_do_avx,
+ int *p_do_avx2, int *p_do_avx512, int *p_do_pclmulqdq)
+{
+ const uint64_t detect_sse =
+ IMB_FEATURE_SSE4_2 | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx =
+ IMB_FEATURE_AVX | IMB_FEATURE_CMOV | IMB_FEATURE_AESNI;
+ const uint64_t detect_avx2 = IMB_FEATURE_AVX2 | detect_avx;
+ const uint64_t detect_avx512 = IMB_FEATURE_AVX512_SKX | detect_avx2;
+ const uint64_t detect_pclmulqdq = IMB_FEATURE_PCLMULQDQ;
+ MB_MGR *p_mgr = NULL;
+
+ if (p_do_aesni_emu == NULL || p_do_sse == NULL ||
+ p_do_avx == NULL || p_do_avx2 == NULL ||
+ p_do_avx512 == NULL)
+ return;
+
+ *p_do_aesni_emu = 1;
+ *p_do_sse = 1;
+ *p_do_avx = 1;
+ *p_do_avx2 = 1;
+ *p_do_avx512 = 1;
+ *p_do_pclmulqdq = 1;
+
+ p_mgr = alloc_mb_mgr(0);
+ if (p_mgr == NULL) {
+ printf("Architecture auto detect error!\n");
+ return;
+ }
+
+ if ((p_mgr->features & detect_avx512) != detect_avx512)
+ *p_do_avx512 = 0;
+
+ if ((p_mgr->features & detect_avx2) != detect_avx2)
+ *p_do_avx2 = 0;
+
+ if ((p_mgr->features & detect_avx) != detect_avx)
+ *p_do_avx = 0;
+
+ if ((p_mgr->features & detect_sse) != detect_sse)
+ *p_do_sse = 0;
+
+ if ((p_mgr->features & detect_pclmulqdq) != detect_pclmulqdq)
+ *p_do_pclmulqdq = 0;
+
+ free_mb_mgr(p_mgr);
+}
+
+int
+main(int argc, char **argv)
+{
+ const char *arch_str_tab[ARCH_NUMOF] = {
+ "SSE", "AVX", "AVX2", "AVX512", "NO_AESNI"
+ };
+ enum arch_type arch_type_tab[ARCH_NUMOF] = {
+ ARCH_SSE, ARCH_AVX, ARCH_AVX2, ARCH_AVX512, ARCH_NO_AESNI
+ };
+ int i, do_sse = 1, do_avx = 1, do_avx2 = 1, do_avx512 = 1;
+ int do_aesni_emu = 1, do_gcm = 1;
+ int auto_detect = 0;
+ MB_MGR *p_mgr = NULL;
+ uint64_t flags = 0;
+ int errors = 0;
+
+ /* Check version number */
+ if (imb_get_version() < IMB_VERSION(0, 50, 0))
+ printf("Library version detection unsupported!\n");
+ else
+ printf("Detected library version: %s\n", imb_get_version_str());
+
+ /* Print available CPU features */
+ print_hw_features();
+
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-h") == 0) {
+ usage(argv[0]);
+ return EXIT_SUCCESS;
+ } else if (strcmp(argv[i], "--no-aesni-emu") == 0) {
+ do_aesni_emu = 0;
+ } else if (strcmp(argv[i], "--no-avx512") == 0) {
+ do_avx512 = 0;
+ } else if (strcmp(argv[i], "--no-avx2") == 0) {
+ do_avx2 = 0;
+ } else if (strcmp(argv[i], "--no-avx") == 0) {
+ do_avx = 0;
+ } else if (strcmp(argv[i], "--no-sse") == 0) {
+ do_sse = 0;
+ } else if (strcmp(argv[i], "--shani-on") == 0) {
+ flags &= (~IMB_FLAG_SHANI_OFF);
+ } else if (strcmp(argv[i], "--shani-off") == 0) {
+ flags |= IMB_FLAG_SHANI_OFF;
+ } else if (strcmp(argv[i], "--no-gcm") == 0) {
+ do_gcm = 0;
+ } else if (strcmp(argv[i], "--auto-detect") == 0) {
+ auto_detect = 1;
+ } else {
+ usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (auto_detect)
+ detect_arch(&do_aesni_emu, &do_sse, &do_avx,
+ &do_avx2, &do_avx512, &do_gcm);
+
+ for (i = 0; i < ARCH_NUMOF; i++) {
+ const enum arch_type atype = arch_type_tab[i];
+
+ switch (atype) {
+ case ARCH_SSE:
+ if (!do_sse)
+ continue;
+ p_mgr = alloc_mb_mgr(flags);
+ if (p_mgr == NULL) {
+ printf("Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_sse(p_mgr);
+ break;
+ case ARCH_AVX:
+ if (!do_avx)
+ continue;
+ p_mgr = alloc_mb_mgr(flags);
+ if (p_mgr == NULL) {
+ printf("Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_avx(p_mgr);
+ break;
+ case ARCH_AVX2:
+ if (!do_avx2)
+ continue;
+ p_mgr = alloc_mb_mgr(flags);
+ if (p_mgr == NULL) {
+ printf("Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_avx2(p_mgr);
+ break;
+ case ARCH_AVX512:
+ if (!do_avx512)
+ continue;
+ p_mgr = alloc_mb_mgr(flags);
+ if (p_mgr == NULL) {
+ printf("Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_avx512(p_mgr);
+ break;
+ case ARCH_NO_AESNI:
+ if (!do_aesni_emu)
+ continue;
+ p_mgr = alloc_mb_mgr(flags | IMB_FLAG_AESNI_OFF);
+ if (p_mgr == NULL) {
+ printf("Error allocating MB_MGR structure!\n");
+ return EXIT_FAILURE;
+ }
+ init_mb_mgr_sse(p_mgr);
+ break;
+ default:
+ printf("Architecture type '%d' error!\n", (int) atype);
+ continue;
+ }
+
+ printf("Testing %s interface\n", arch_str_tab[i]);
+
+ errors += known_answer_test(p_mgr);
+ errors += do_test(p_mgr);
+ errors += ctr_test(atype, p_mgr);
+ errors += pon_test(atype, p_mgr);
+ if (do_gcm)
+ errors += gcm_test(p_mgr);
+ errors += customop_test(p_mgr);
+ errors += des_test(atype, p_mgr);
+ errors += ccm_test(atype, p_mgr);
+ errors += cmac_test(atype, p_mgr);
+ errors += zuc_test(atype, p_mgr);
+ errors += kasumi_test(atype, p_mgr);
+ errors += snow3g_test(atype, p_mgr);
+ errors += hmac_sha1_test(atype, p_mgr);
+ errors += hmac_sha256_sha512_test(atype, p_mgr);
+ errors += hmac_md5_test(atype, p_mgr);
+ errors += aes_test(atype, p_mgr);
+ errors += ecb_test(atype, p_mgr);
+ errors += sha_test(atype, p_mgr);
+ errors += chained_test(atype, p_mgr);
+ errors += api_test(atype, p_mgr);
+ errors += direct_api_test(atype, p_mgr);
+ free_mb_mgr(p_mgr);
+ }
+
+ if (errors) {
+ printf("Test completed: FAIL\n");
+ return EXIT_FAILURE;
+ }
+
+ printf("Test completed: PASS\n");
+
+ return EXIT_SUCCESS;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/misc.asm b/src/spdk/intel-ipsec-mb/LibTestApp/misc.asm
new file mode 100644
index 000000000..d4f06ca92
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/misc.asm
@@ -0,0 +1,251 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+%ifdef LINUX
+;;; macro to declare global symbols
+;;; - name : symbol name
+;;; - type : funtion or data
+;;; - scope : internal, private, default
+%define MKGLOBAL(name,type,scope) global name %+ : %+ type scope
+%endif
+
+%ifdef WIN_ABI
+;;; macro to declare global symbols
+;;; - name : symbol name
+;;; - type : funtion or data
+;;; - scope : internal, private, default (ignored in win64 coff format)
+%define MKGLOBAL(name,type,scope) global name
+%endif
+
+section .bss
+default rel
+
+MKGLOBAL(gps,data,)
+gps: resq 14
+
+MKGLOBAL(simd_regs,data,)
+alignb 64
+simd_regs: resb 32*64
+
+section .text
+
+;; Returns RSP pointer with the value BEFORE the call, so 8 bytes need
+;; to be added
+MKGLOBAL(rdrsp,function,)
+rdrsp:
+ lea rax, [rsp + 8]
+ ret
+
+MKGLOBAL(dump_gps,function,)
+dump_gps:
+
+ mov [rel gps], rax
+ mov [rel gps + 8], rbx
+ mov [rel gps + 16], rcx
+ mov [rel gps + 24], rdx
+ mov [rel gps + 32], rdi
+ mov [rel gps + 40], rsi
+
+%assign i 8
+%assign j 0
+%rep 8
+ mov [rel gps + 48 + j], r %+i
+%assign i (i+1)
+%assign j (j+8)
+%endrep
+
+ ret
+
+MKGLOBAL(dump_xmms_sse,function,)
+dump_xmms_sse:
+
+%assign i 0
+%assign j 0
+%rep 16
+ movdqa [rel simd_regs + j], xmm %+i
+%assign i (i+1)
+%assign j (j+16)
+%endrep
+
+ ret
+
+MKGLOBAL(dump_xmms_avx,function,)
+dump_xmms_avx:
+
+%assign i 0
+%assign j 0
+%rep 16
+ vmovdqa [rel simd_regs + j], xmm %+i
+%assign i (i+1)
+%assign j (j+16)
+%endrep
+
+ ret
+
+MKGLOBAL(dump_ymms,function,)
+dump_ymms:
+
+%assign i 0
+%assign j 0
+%rep 16
+ vmovdqa [rel simd_regs + j], ymm %+i
+%assign i (i+1)
+%assign j (j+32)
+%endrep
+
+ ret
+
+MKGLOBAL(dump_zmms,function,)
+dump_zmms:
+
+%assign i 0
+%assign j 0
+%rep 32
+ vmovdqa64 [rel simd_regs + j], zmm %+i
+%assign i (i+1)
+%assign j (j+64)
+%endrep
+
+ ret
+
+;
+; This function clears all scratch XMM registers
+;
+; void clear_scratch_xmms_sse(void)
+MKGLOBAL(clear_scratch_xmms_sse,function,internal)
+clear_scratch_xmms_sse:
+
+%ifdef LINUX
+%assign i 0
+%rep 16
+ pxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+; On Windows, XMM0-XMM5 registers are scratch registers
+%else
+%assign i 0
+%rep 6
+ pxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+
+ ret
+
+;
+; This function clears all scratch XMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15)
+;
+; void clear_scratch_xmms_avx(void)
+MKGLOBAL(clear_scratch_xmms_avx,function,internal)
+clear_scratch_xmms_avx:
+
+%ifdef LINUX
+ vzeroall
+; On Windows, XMM0-XMM5 registers are scratch registers
+%else
+%assign i 0
+%rep 6
+ vpxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+
+ ret
+
+;
+; This function clears all scratch YMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15)
+;
+; void clear_scratch_ymms(void)
+MKGLOBAL(clear_scratch_ymms,function,internal)
+clear_scratch_ymms:
+; On Linux, all YMM registers are scratch registers
+%ifdef LINUX
+ vzeroall
+; On Windows, YMM0-YMM5 registers are scratch registers.
+; YMM6-YMM15 upper 128 bits are scratch registers too, but
+; the lower 128 bits are to be restored after calling these function
+; which clears the upper bits too.
+%else
+%assign i 0
+%rep 6
+ vpxor ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+
+ ret
+
+;
+; This function clears all scratch ZMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15). YMM registers are used
+; on purpose, since XOR'ing YMM registers is faster
+; than XOR'ing ZMM registers, and the operation clears
+; also the upper 256 bits
+;
+; void clear_scratch_zmms(void)
+MKGLOBAL(clear_scratch_zmms,function,internal)
+clear_scratch_zmms:
+
+; On Linux, all ZMM registers are scratch registers
+%ifdef LINUX
+ vzeroall
+ ;; vzeroall only clears the first 16 ZMM registers
+%assign i 16
+%rep 16
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+; On Windows, ZMM0-ZMM5 and ZMM16-ZMM31 registers are scratch registers.
+; ZMM6-ZMM15 upper 384 bits are scratch registers too, but
+; the lower 128 bits are to be restored after calling these function
+; which clears the upper bits too.
+%else
+%assign i 0
+%rep 6
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+
+%assign i 16
+%rep 16
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+
+ ret
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/misc.h b/src/spdk/intel-ipsec-mb/LibTestApp/misc.h
new file mode 100644
index 000000000..2b1f68ea0
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/misc.h
@@ -0,0 +1,58 @@
+/*****************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#ifndef XVALIDAPP_MISC_H
+#define XVALIDAPP_MISC_H
+
+/* RAX, RBX, RCX, RDX, RDI, RSI, R8-R15 */
+#define GP_MEM_SIZE 14*8
+
+#define XMM_MEM_SIZE 16*16
+#define YMM_MEM_SIZE 16*32
+#define ZMM_MEM_SIZE 32*64
+
+/* Memory allocated in BSS section in misc.asm */
+extern uint8_t gps[GP_MEM_SIZE];
+extern uint8_t simd_regs[ZMM_MEM_SIZE];
+
+/* Read RSP pointer */
+void *rdrsp(void);
+
+/* Functions to dump all registers into predefined memory */
+void dump_gps(void);
+void dump_xmms_sse(void);
+void dump_xmms_avx(void);
+void dump_ymms(void);
+void dump_zmms(void);
+
+/* Functions to clear all scratch SIMD registers */
+void clear_scratch_xmms_sse(void);
+void clear_scratch_xmms_avx(void);
+void clear_scratch_ymms(void);
+void clear_scratch_zmms(void);
+
+#endif /* XVALIDAPP_MISC_H */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/pon_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/pon_test.c
new file mode 100644
index 000000000..e5091a94e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/pon_test.c
@@ -0,0 +1,694 @@
+/*****************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int pon_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/* === vector 1 */
+
+static const uint8_t KEY1_PON[] = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+};
+
+static const uint8_t IV1_PON[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+};
+
+static const uint8_t IN1_PON[] = {
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, /* Ethernet frame */
+ 0xcd, 0xfb, 0x3c, 0xb6 /* CRC value */
+};
+
+static const uint8_t OUT1_PON[] = {
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23, /* XGEM header */
+ 0xC7, 0x62, 0x82, 0xCA, /* Ethernet frame */
+ 0x3E, 0x92, 0xC8, 0x5A /* CRC value */
+};
+#define BIPOUT1_PON 0xA24CD0F9
+#define OFFSET1_PON 8
+#define LENBIP1_PON sizeof(IN1_PON)
+#define LENCIPH1_PON (LENBIP1_PON - OFFSET1_PON)
+
+/* === vector 2 */
+
+static const uint8_t IN2_PON[] = {
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01,
+ 0x00, 0x14, 0xa9, 0x04 /* CRC value */
+};
+
+static const uint8_t OUT2_PON[] = {
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C, /* XGEM header */
+ 0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+ 0xB7, 0x90, 0x1E, 0x02,
+ 0xEA, 0x38, 0xA1, 0x78
+};
+
+#define KEY2_PON KEY1_PON
+#define IV2_PON IV1_PON
+#define BIPOUT2_PON 0x70C6E56C
+#define OFFSET2_PON 8
+#define LENBIP2_PON sizeof(IN2_PON)
+#define LENCIPH2_PON (LENBIP2_PON - OFFSET2_PON)
+
+/* === vector 3 */
+
+static const uint8_t IN3_PON[] = {
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0xB0, 0x7E,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xBD, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ 0x53, 0xc1, 0xe6, 0x0c /* CRC value */
+};
+
+static const uint8_t OUT3_PON[] = {
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B, /* XGEM header */
+ 0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+ 0xB7, 0x90, 0x1E, 0x02, 0x6B, 0x2C, 0x08, 0x7D,
+ 0x3C, 0x90, 0xE8, 0x2C, 0x44, 0x30, 0x03, 0x29,
+ 0x5F, 0x88, 0xA9, 0xD6, 0x1E, 0xF9, 0xD1, 0xF1,
+ 0xD6, 0x16, 0x8C, 0x72, 0xA4, 0xCD, 0xD2, 0x8F,
+ 0x63, 0x26, 0xC9, 0x66, 0xB0, 0x65, 0x24, 0x9B,
+ 0x60, 0x5B, 0x18, 0x60, 0xBD, 0xD5, 0x06, 0x13,
+ 0x40, 0xC9, 0x60, 0x64,
+ 0x36, 0x5F, 0x86, 0x8C
+};
+#define KEY3_PON KEY1_PON
+#define IV3_PON IV1_PON
+#define BIPOUT3_PON 0xFBADE0DF
+#define OFFSET3_PON 8
+#define LENBIP3_PON sizeof(IN3_PON)
+#define LENCIPH3_PON (LENBIP3_PON - OFFSET3_PON)
+
+/* === vector 4 */
+
+static const uint8_t IN4_PON[] = {
+ 0x01, 0x10, 0x27, 0x11, 0x00, 0x00, 0x3C, 0x18, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0x70, 0x63,
+ 0x00, 0x00, 0x04, 0x06, 0xC3, 0xD8, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x49, 0x0d, 0x52, 0xab /* CRC value */
+};
+
+static const uint8_t OUT4_PON[] = {
+ 0x01, 0x10, 0x27, 0x11, 0x00, 0x00, 0x3C, 0x18, /* XGEM header */
+ 0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED, /* Ethernet frame */
+ 0xB7, 0x90, 0x1E, 0x02, 0x6B, 0x2C, 0x08, 0x7D,
+ 0x3C, 0x90, 0xE8, 0x2C, 0x44, 0x30, 0xC3, 0x34,
+ 0x5F, 0x88, 0xA9, 0xD6, 0x5E, 0x9C, 0xD1, 0xF1,
+ 0xD6, 0x16, 0x8C, 0x72, 0xA4, 0xCD, 0xD2, 0x8F,
+ 0x63, 0x26, 0xC9, 0x66, 0xB0, 0x65, 0x24, 0x9B,
+ 0x60, 0x5B, 0x18, 0x60, 0xBD, 0xD5, 0x06, 0x13,
+ 0x40, 0xC9, 0x60, 0x64, 0x57, 0xAD, 0x54, 0xB5,
+ 0xD9, 0xEA, 0x01, 0xB2
+};
+#define KEY4_PON KEY1_PON
+#define IV4_PON IV1_PON
+#define BIPOUT4_PON 0x7EB18D27
+#define OFFSET4_PON 8
+#define LENBIP4_PON sizeof(IN4_PON)
+#define LENCIPH4_PON (LENBIP4_PON - OFFSET4_PON)
+
+/* Vectors with no encryption */
+static const uint8_t IN5_PON[] = {
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, /* Ethernet frame */
+ 0xCD, 0xFB, 0x3C, 0xB6 /* CRC value */
+};
+
+static const uint8_t OUT5_PON[] = {
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, /* Ethernet frame */
+ 0xCD, 0xFB, 0x3C, 0xB6 /* CRC value */
+};
+#define BIPOUT5_PON 0x8039D9CC
+#define OFFSET5_PON 8
+#define LENBIP5_PON sizeof(IN5_PON)
+#define LENCIPH5_PON (LENBIP5_PON - OFFSET5_PON)
+
+static const uint8_t IN6_PON[] = {
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01,
+ 0x00, 0x14, 0xa9, 0x04 /* CRC value */
+};
+
+static const uint8_t OUT6_PON[] = {
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01,
+ 0x00, 0x14, 0xa9, 0x04
+};
+
+#define BIPOUT6_PON 0x2DA45105
+#define OFFSET6_PON 8
+#define LENBIP6_PON sizeof(IN6_PON)
+#define LENCIPH6_PON (LENBIP6_PON - OFFSET6_PON)
+
+static const uint8_t IN7_PON[] = {
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0xb0, 0x7e,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xbd, 0xc0, 0xa8,
+ 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0x04, 0xd2,
+ 0x16, 0x2e, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xa6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ 0x53, 0xC1, 0xE6, 0x0C /* CRC value */
+};
+
+static const uint8_t OUT7_PON[] = {
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0xb0, 0x7e,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xbd, 0xc0, 0xa8,
+ 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0x04, 0xd2,
+ 0x16, 0x2e, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xa6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ 0x53, 0xC1, 0xE6, 0x0C
+};
+#define BIPOUT7_PON 0xABC2D56A
+#define OFFSET7_PON 8
+#define LENBIP7_PON sizeof(IN7_PON)
+#define LENCIPH7_PON (LENBIP7_PON - OFFSET7_PON)
+
+static const uint8_t IN8_PON[] = {
+ 0x01, 0x10, 0x27, 0x11, 0x00, 0x00, 0x3C, 0x18, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0x70, 0x63,
+ 0x00, 0x00, 0x04, 0x06, 0xC3, 0xD8, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x49, 0x0D, 0x52, 0xAB /* CRC value */
+};
+
+static const uint8_t OUT8_PON[] = {
+ 0x01, 0x10, 0x27, 0x11, 0x00, 0x00, 0x3C, 0x18, /* XGEM header */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01, /* Ethernet frame */
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0x70, 0x63,
+ 0x00, 0x00, 0x04, 0x06, 0xC3, 0xD8, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x49, 0x0D, 0x52, 0xAB /* CRC value */
+};
+#define BIPOUT8_PON 0x378D5F02
+#define OFFSET8_PON 8
+#define LENBIP8_PON sizeof(IN8_PON)
+#define LENCIPH8_PON (LENBIP8_PON - OFFSET8_PON)
+
+/* Vectors with encryption and with padding */
+/* === vector 9 */
+static const uint8_t IN9_PON[] = {
+ 0x00, 0x39, 0x03, 0xfd, 0x00, 0x00, 0xb3, 0x6a, /* XGEM header */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* Ethernet frame */
+ 0x10, 0x11,
+ 0x8c, 0xd0, 0x9a, 0x8b, /* CRC value */
+ 0x55, 0x55 /* XGEM padding */
+};
+
+static const uint8_t OUT9_PON[] = {
+ 0x00, 0x39, 0x03, 0xfd, 0x00, 0x00, 0xb3, 0x6a, /* XGEM header */
+ 0x73, 0xe0, 0x5d, 0x5d, 0x32, 0x9c, 0x3b, 0xfa, /* Ethernet frame */
+ 0x6b, 0x66,
+ 0xf6, 0x8e, 0x5b, 0xd5, /* CRC value */
+ 0xab, 0xcd /* XGEM padding */
+};
+
+#define KEY9_PON KEY1_PON
+
+static const uint8_t IV9_PON[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+#define BIPOUT9_PON 0x738bf671
+#define OFFSET9_PON 8
+#define LENBIP9_PON sizeof(IN9_PON)
+#define LENCIPH9_PON (LENBIP9_PON - OFFSET9_PON)
+
+/* === vector 10 */
+
+/* This is fragmented frame (1 bytes payload + padding)
+ * - computed CRC will not match value in the message
+ * - on encrypt CRC should not be written into the message
+ */
+static const uint8_t IN10_PON[] = {
+ 0x00, 0x05, 0x03, 0xfd, 0x00, 0x00, 0xb9, 0xb4, /* XGEM header */
+ 0x08, /* Ethernet frame */
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55 /* XGEM padding */
+};
+
+static const uint8_t OUT10_PON[] = {
+ 0x00, 0x05, 0x03, 0xfd, 0x00, 0x00, 0xb9, 0xb4, /* XGEM header */
+ 0x73, /* Ethernet frame */
+ 0xbc, 0x02, 0x03, 0x6b, 0xc4, 0x60, 0xa0 /* XGEM padding */
+};
+
+#define KEY10_PON KEY1_PON
+#define IV10_PON IV9_PON
+#define BIPOUT10_PON 0xead87d18
+#define OFFSET10_PON 8
+#define LENBIP10_PON sizeof(IN10_PON)
+#define LENCIPH10_PON (LENBIP10_PON - OFFSET10_PON)
+
+/* Vectors with no encryption and with padding */
+/* === vector 11 */
+static const uint8_t IN11_PON[] = {
+ 0x00, 0x39, 0x03, 0xfd, 0x00, 0x00, 0xb3, 0x6a, /* XGEM header */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* Ethernet frame */
+ 0x10, 0x11,
+ 0x8c, 0xd0, 0x9a, 0x8b, /* CRC value */
+ 0x55, 0x55 /* XGEM padding */
+};
+
+static const uint8_t OUT11_PON[] = {
+ 0x00, 0x39, 0x03, 0xfd, 0x00, 0x00, 0xb3, 0x6a, /* XGEM header */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* Ethernet frame */
+ 0x10, 0x11,
+ 0x8c, 0xd0, 0x9a, 0x8b, /* CRC value */
+ 0x55, 0x55 /* XGEM padding */
+};
+
+#define KEY11_PON KEY1_PON
+#define BIPOUT11_PON 0x166da78e
+#define OFFSET11_PON 8
+#define LENBIP11_PON sizeof(IN11_PON)
+#define LENCIPH11_PON (LENBIP11_PON - OFFSET11_PON)
+
+/* === vector 12 */
+
+/* This is fragmented frame (1 bytes payload + padding)
+ * - computed CRC will not match value in the message
+ * - on encrypt CRC should not be written into the message
+ */
+static const uint8_t IN12_PON[] = {
+ 0x00, 0x05, 0x03, 0xfd, 0x00, 0x00, 0xb9, 0xb4, /* XGEM header */
+ 0x08, /* Ethernet frame */
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55 /* XGEM padding */
+};
+
+static const uint8_t OUT12_PON[] = {
+ 0x00, 0x05, 0x03, 0xfd, 0x00, 0x00, 0xb9, 0xb4, /* XGEM header */
+ 0x08, /* Ethernet frame */
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55 /* XGEM padding */
+};
+
+#define KEY12_PON KEY1_PON
+#define BIPOUT12_PON 0x49ba055d
+#define OFFSET12_PON 8
+#define LENBIP12_PON sizeof(IN12_PON)
+#define LENCIPH12_PON (LENBIP12_PON - OFFSET12_PON)
+
+/* === vector 13 */
+
+/* This is fragmented frame (4 bytes payload + padding)
+ * - computed CRC will not match value in the message
+ * - on encrypt CRC should not be written into the message
+ */
+static const uint8_t IN13_PON[] = {
+ 0x00, 0x11, 0x03, 0xfd, 0x00, 0x00, 0xbf, 0xff, /* XGEM header */
+ 0x08, 0x09, 0x0a, 0x0b, /* Ethernet frame */
+ 0x55, 0x55, 0x55, 0x55 /* XGEM padding */
+};
+
+static const uint8_t OUT13_PON[] = {
+ 0x00, 0x11, 0x03, 0xfd, 0x00, 0x00, 0xbf, 0xff, /* XGEM header */
+ 0x73, 0xe0, 0x5d, 0x5d, /* Ethernet frame */
+ 0x6b, 0xc4, 0x60, 0xa0 /* XGEM padding */
+};
+
+#define KEY13_PON KEY1_PON
+#define IV13_PON IV9_PON
+#define BIPOUT13_PON 0xff813518
+#define OFFSET13_PON 8
+#define LENBIP13_PON sizeof(IN13_PON)
+#define LENCIPH13_PON (LENBIP13_PON - OFFSET13_PON)
+
+
+#define ponvector(tname) \
+ { KEY ## tname, IV ## tname, IN ## tname, OUT ## tname, \
+ BIPOUT ## tname, LENBIP ## tname, \
+ LENCIPH ## tname, OFFSET ## tname }
+
+#define pon_no_ctr_vector(tname) \
+ { NULL, NULL, IN ## tname, OUT ## tname, \
+ BIPOUT ## tname, LENBIP ## tname, \
+ LENCIPH ## tname, OFFSET ## tname }
+
+
+static const struct pon_test_vector {
+ const uint8_t *key;
+ const uint8_t *iv;
+ const uint8_t *in;
+ const uint8_t *out;
+ const uint32_t bip_out;
+ size_t length_to_bip;
+ size_t length_to_cipher;
+ size_t offset_to_crc_cipher;
+} pon_vectors[] = {
+ ponvector(1_PON),
+ ponvector(2_PON),
+ ponvector(3_PON),
+ ponvector(4_PON),
+ pon_no_ctr_vector(5_PON),
+ pon_no_ctr_vector(6_PON),
+ pon_no_ctr_vector(7_PON),
+ pon_no_ctr_vector(8_PON),
+ ponvector(9_PON),
+ ponvector(10_PON),
+ pon_no_ctr_vector(11_PON),
+ pon_no_ctr_vector(12_PON),
+ ponvector(13_PON),
+};
+
+static int
+test_pon(struct MB_MGR *mb_mgr,
+ const void *expkey,
+ const void *iv,
+ const uint8_t *in_text,
+ const uint8_t *out_text,
+ const size_t len_to_cipher,
+ const size_t len_to_bip,
+ const size_t offset_to_cipher_crc,
+ const uint32_t bip_out,
+ const int dir,
+ const int order)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t *target = malloc(len_to_bip + (sizeof(padding) * 2));
+ int ret = -1;
+ uint64_t tag_output = 0;
+ uint32_t bip_output = 0;
+ uint32_t crc_output = 0;
+
+ if (target == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ return ret;
+ }
+
+ memset(target, -1, len_to_bip + (sizeof(padding) * 2));
+ memset(padding, -1, sizeof(padding));
+
+ if (dir == ENCRYPT) {
+ const uint16_t pli = ((((uint16_t) in_text[0]) << 8) |
+ ((uint16_t) in_text[1])) >> 2;
+
+ memcpy(target + sizeof(padding), in_text, len_to_bip);
+
+ /* Corrupt HEC on encrypt direction
+ * This is to make sure HEC gets updated by the library
+ */
+ target[sizeof(padding) + 7] ^= 0xff;
+
+ /* Corrupt Ethernet FCS/CRC on encrypt direction
+ * This is to make sure CRC gets updated by the library
+ */
+ if (pli > 4) {
+ uint8_t *p_crc = &target[sizeof(padding) + 8 + pli - 4];
+
+ p_crc[0] ^= 0xff;
+ p_crc[1] ^= 0xff;
+ p_crc[2] ^= 0xff;
+ p_crc[3] ^= 0xff;
+ }
+ } else {
+ memcpy(target + sizeof(padding), out_text, len_to_bip);
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ job->cipher_direction = dir;
+ job->chain_order = order;
+ job->dst = target + sizeof(padding) + offset_to_cipher_crc;
+ job->src = target + sizeof(padding) /* in_text */;
+ job->cipher_mode = PON_AES_CNTR;
+ job->cipher_start_src_offset_in_bytes = (uint64_t) offset_to_cipher_crc;
+
+ /* If IV == NULL, NO CTR is done */
+ if (iv != NULL) {
+ job->aes_enc_key_expanded = expkey;
+ job->aes_dec_key_expanded = expkey;
+ job->aes_key_len_in_bytes = AES_128_BYTES;
+ job->iv = iv;
+ job->iv_len_in_bytes = 16;
+ job->msg_len_to_cipher_in_bytes = (uint64_t) len_to_cipher;
+ } else {
+ job->aes_enc_key_expanded = NULL;
+ job->aes_dec_key_expanded = NULL;
+ job->aes_key_len_in_bytes = 0;
+ job->iv = NULL;
+ job->iv_len_in_bytes = 0;
+ job->msg_len_to_cipher_in_bytes = 0;
+ }
+
+ job->hash_alg = PON_CRC_BIP;
+ job->hash_start_src_offset_in_bytes = 0;
+ job->msg_len_to_hash_in_bytes = (uint64_t) len_to_bip;
+ job->auth_tag_output = (void *) &tag_output;
+ job->auth_tag_output_len_in_bytes = (uint64_t) sizeof(tag_output);
+ job = IMB_SUBMIT_JOB(mb_mgr);
+
+ if (job == NULL) {
+ printf("%d NULL job after submit()", __LINE__);
+ goto end;
+ }
+
+ if (job->status != STS_COMPLETED) {
+ printf("%d Error status:%d", __LINE__, job->status);
+ goto end;
+ }
+
+ bip_output = (uint32_t) tag_output;
+ crc_output = (uint32_t) (tag_output >> 32);
+
+#ifdef DEBUG
+ printf("CRC received 0x%08x\n", crc_output);
+ printf("BIP received 0x%08x\n", bip_output);
+#endif
+
+#ifdef DEBUG
+ int is_error = 0;
+#endif
+
+ if (dir == DECRYPT) {
+ const uint16_t pli = ((((uint16_t) in_text[0]) << 8) |
+ ((uint16_t) in_text[1])) >> 2;
+
+ if (pli > 4) {
+ const uint32_t crc_in_msg =
+ *((const uint32_t *)&in_text[8 + pli - 4]);
+ if (crc_in_msg != crc_output) {
+ printf("CRC mismatch on decrypt! "
+ "expected 0x%08x, received 0x%08x\n",
+ crc_in_msg, crc_output);
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+ }
+ }
+
+ if (bip_output != bip_out) {
+ printf("BIP mismatch! expected 0x%08x, received 0x%08x\n",
+ bip_out, bip_output);
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+
+ if (dir == ENCRYPT) {
+ if (memcmp(out_text, target + sizeof(padding), len_to_bip)) {
+ printf("output mismatch\n");
+ hexdump(stderr, "Target",
+ target, len_to_bip + (2 * sizeof(padding)));
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+ } else {
+ if (memcmp(in_text, target + sizeof(padding), len_to_bip - 4)) {
+ printf("output mismatch\n");
+ hexdump(stderr, "Target", target,
+ len_to_bip + (2 * sizeof(padding)));
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+ }
+
+ if (memcmp(padding, target, sizeof(padding))) {
+ printf("overwrite head\n");
+ hexdump(stderr, "Target", target,
+ len_to_bip + (2 * sizeof(padding)));
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+
+ if (memcmp(padding, target + sizeof(padding) + len_to_bip,
+ sizeof(padding))) {
+ printf("overwrite tail\n");
+ hexdump(stderr, "Target", target,
+ len_to_bip + (2 * sizeof(padding)));
+#ifdef DEBUG
+ is_error = 1;
+#else
+ goto end;
+#endif
+ }
+
+#ifdef DEBUG
+ if (is_error)
+ goto end;
+#endif
+
+ /* all checks passed */
+ ret = 0;
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+ end:
+ if (target != NULL)
+ free(target);
+
+ return ret;
+}
+
+static int
+test_pon_std_vectors(struct MB_MGR *mb_mgr)
+{
+ const int vectors_cnt = sizeof(pon_vectors) / sizeof(pon_vectors[0]);
+ int vect;
+ int errors = 0;
+ DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+ DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+
+ printf("PON (AES128-CTR/CRC/BIP) test vectors:\n");
+
+ for (vect = 0; vect < vectors_cnt; vect++) {
+#ifdef DEBUG
+ printf("Vector %d/%d CIPHLen:%d BIPLen:%d\n",
+ vect + 1, vectors_cnt,
+ (int) pon_vectors[vect].length_to_cipher,
+ (int) pon_vectors[vect].length_to_bip);
+#else
+ printf(".");
+#endif
+
+ if (pon_vectors[vect].key != NULL)
+ IMB_AES_KEYEXP_128(mb_mgr, pon_vectors[vect].key,
+ expkey, dust);
+
+ if (test_pon(mb_mgr,
+ expkey,
+ pon_vectors[vect].iv,
+ pon_vectors[vect].in,
+ pon_vectors[vect].out,
+ pon_vectors[vect].length_to_cipher,
+ pon_vectors[vect].length_to_bip,
+ pon_vectors[vect].offset_to_crc_cipher,
+ pon_vectors[vect].bip_out,
+ ENCRYPT, HASH_CIPHER)) {
+ printf("error #%d encrypt\n", vect + 1);
+ errors++;
+ }
+
+ if (test_pon(mb_mgr,
+ expkey,
+ pon_vectors[vect].iv,
+ pon_vectors[vect].in,
+ pon_vectors[vect].out,
+ pon_vectors[vect].length_to_cipher,
+ pon_vectors[vect].length_to_bip,
+ pon_vectors[vect].offset_to_crc_cipher,
+ pon_vectors[vect].bip_out,
+ DECRYPT, CIPHER_HASH)) {
+ printf("error #%d decrypt\n", vect + 1);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int pon_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ if (arch != ARCH_NO_AESNI)
+ errors = test_pon_std_vectors(mb_mgr);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/sha_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/sha_test.c
new file mode 100644
index 000000000..4d914a29b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/sha_test.c
@@ -0,0 +1,588 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+int sha_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+/*
+ * Test vectors come from this NIST document:
+ *
+ * https://csrc.nist.gov/csrc/media/projects/
+ * cryptographic-standards-and-guidelines/documents/examples/sha_all.pdf
+ */
+static const char message1[] = "abc";
+#define message1_len 3
+
+static const char message2[] = "";
+#define message2_len 0
+
+static const char message3[] =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+#define message3_len 56
+
+static const char message4[] =
+ "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmn"
+ "opjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
+#define message4_len 112
+
+/* macro converts one 32-bit word into four 8-bit word */
+#define CONVERT_UINT32_TO_4xUINT8(v) \
+ (((v) >> 24) & 0xff), (((v) >> 16) & 0xff), \
+ (((v) >> 8) & 0xff), (((v) >> 0) & 0xff)
+
+/* macro converts one 64-bit word into eight 8-bit word */
+#define CONVERT_UINT64_TO_8xUINT8(v) \
+ (((v) >> 56) & 0xff), (((v) >> 48) & 0xff), \
+ (((v) >> 40) & 0xff), (((v) >> 32) & 0xff), \
+ (((v) >> 24) & 0xff), (((v) >> 16) & 0xff), \
+ (((v) >> 8) & 0xff), (((v) >> 0) & 0xff)
+
+static const char test_case1[] = "SHA-1 MSG1";
+#define data1 ((const uint8_t *)message1)
+#define data_len1 message1_len
+static const uint8_t digest1[] = {
+ /* a9993e36 4706816a ba3e2571 7850c26c 9cd0d89d */
+ CONVERT_UINT32_TO_4xUINT8(0xa9993e36),
+ CONVERT_UINT32_TO_4xUINT8(0x4706816a),
+ CONVERT_UINT32_TO_4xUINT8(0xba3e2571),
+ CONVERT_UINT32_TO_4xUINT8(0x7850c26c),
+ CONVERT_UINT32_TO_4xUINT8(0x9cd0d89d)
+};
+#define digest_len1 sizeof(digest1)
+
+static const char test_case2[] = "SHA-224 MSG1";
+#define data2 ((const uint8_t *)message1)
+#define data_len2 message1_len
+static const uint8_t digest2[] = {
+ /* 23097d22 3405d822 8642a477 bda255b3 */
+ /* 2aadbce4 bda0b3f7 e36c9da7 */
+ CONVERT_UINT32_TO_4xUINT8(0x23097d22),
+ CONVERT_UINT32_TO_4xUINT8(0x3405d822),
+ CONVERT_UINT32_TO_4xUINT8(0x8642a477),
+ CONVERT_UINT32_TO_4xUINT8(0xbda255b3),
+ CONVERT_UINT32_TO_4xUINT8(0x2aadbce4),
+ CONVERT_UINT32_TO_4xUINT8(0xbda0b3f7),
+ CONVERT_UINT32_TO_4xUINT8(0xe36c9da7)
+};
+#define digest_len2 sizeof(digest2)
+
+static const char test_case3[] = "SHA-256 MSG1";
+#define data3 ((const uint8_t *)message1)
+#define data_len3 message1_len
+static const uint8_t digest3[] = {
+ /* ba7816bf 8f01cfea 414140de 5dae2223 */
+ /* b00361a3 96177a9c b410ff61 f20015ad */
+ CONVERT_UINT32_TO_4xUINT8(0xba7816bf),
+ CONVERT_UINT32_TO_4xUINT8(0x8f01cfea),
+ CONVERT_UINT32_TO_4xUINT8(0x414140de),
+ CONVERT_UINT32_TO_4xUINT8(0x5dae2223),
+ CONVERT_UINT32_TO_4xUINT8(0xb00361a3),
+ CONVERT_UINT32_TO_4xUINT8(0x96177a9c),
+ CONVERT_UINT32_TO_4xUINT8(0xb410ff61),
+ CONVERT_UINT32_TO_4xUINT8(0xf20015ad)
+};
+#define digest_len3 sizeof(digest3)
+
+static const char test_case4[] = "SHA-384 MSG1";
+#define data4 ((const uint8_t *)message1)
+#define data_len4 message1_len
+static const uint8_t digest4[] = {
+ /* cb00753f45a35e8b b5a03d699ac65007 */
+ /* 272c32ab0eded163 1a8b605a43ff5bed */
+ /* 8086072ba1e7cc23 58baeca134c825a7 */
+ CONVERT_UINT64_TO_8xUINT8(0xcb00753f45a35e8b),
+ CONVERT_UINT64_TO_8xUINT8(0xb5a03d699ac65007),
+ CONVERT_UINT64_TO_8xUINT8(0x272c32ab0eded163),
+ CONVERT_UINT64_TO_8xUINT8(0x1a8b605a43ff5bed),
+ CONVERT_UINT64_TO_8xUINT8(0x8086072ba1e7cc23),
+ CONVERT_UINT64_TO_8xUINT8(0x58baeca134c825a7)
+};
+#define digest_len4 sizeof(digest4)
+
+static const char test_case5[] = "SHA-512 MSG1";
+#define data5 ((const uint8_t *)message1)
+#define data_len5 message1_len
+static const uint8_t digest5[] = {
+ /* ddaf35a193617aba cc417349ae204131 */
+ /* 12e6fa4e89a97ea2 0a9eeee64b55d39a */
+ /* 2192992a274fc1a8 36ba3c23a3feebbd */
+ /* 454d4423643ce80e 2a9ac94fa54ca49f */
+ CONVERT_UINT64_TO_8xUINT8(0xddaf35a193617aba),
+ CONVERT_UINT64_TO_8xUINT8(0xcc417349ae204131),
+ CONVERT_UINT64_TO_8xUINT8(0x12e6fa4e89a97ea2),
+ CONVERT_UINT64_TO_8xUINT8(0x0a9eeee64b55d39a),
+ CONVERT_UINT64_TO_8xUINT8(0x2192992a274fc1a8),
+ CONVERT_UINT64_TO_8xUINT8(0x36ba3c23a3feebbd),
+ CONVERT_UINT64_TO_8xUINT8(0x454d4423643ce80e),
+ CONVERT_UINT64_TO_8xUINT8(0x2a9ac94fa54ca49f)
+};
+#define digest_len5 sizeof(digest5)
+
+static const char test_case10[] = "SHA-1 MSG2";
+#define data10 ((const uint8_t *)message2)
+#define data_len10 message2_len
+static const uint8_t digest10[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xda39a3ee),
+ CONVERT_UINT32_TO_4xUINT8(0x5e6b4b0d),
+ CONVERT_UINT32_TO_4xUINT8(0x3255bfef),
+ CONVERT_UINT32_TO_4xUINT8(0x95601890),
+ CONVERT_UINT32_TO_4xUINT8(0xafd80709)
+};
+#define digest_len10 sizeof(digest10)
+
+static const char test_case11[] = "SHA-224 MSG2";
+#define data11 ((const uint8_t *)message2)
+#define data_len11 message2_len
+static const uint8_t digest11[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xd14a028c),
+ CONVERT_UINT32_TO_4xUINT8(0x2a3a2bc9),
+ CONVERT_UINT32_TO_4xUINT8(0x476102bb),
+ CONVERT_UINT32_TO_4xUINT8(0x288234c4),
+ CONVERT_UINT32_TO_4xUINT8(0x15a2b01f),
+ CONVERT_UINT32_TO_4xUINT8(0x828ea62a),
+ CONVERT_UINT32_TO_4xUINT8(0xc5b3e42f)
+};
+#define digest_len11 sizeof(digest11)
+
+static const char test_case12[] = "SHA-256 MSG2";
+#define data12 ((const uint8_t *)message2)
+#define data_len12 message2_len
+static const uint8_t digest12[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xe3b0c442),
+ CONVERT_UINT32_TO_4xUINT8(0x98fc1c14),
+ CONVERT_UINT32_TO_4xUINT8(0x9afbf4c8),
+ CONVERT_UINT32_TO_4xUINT8(0x996fb924),
+ CONVERT_UINT32_TO_4xUINT8(0x27ae41e4),
+ CONVERT_UINT32_TO_4xUINT8(0x649b934c),
+ CONVERT_UINT32_TO_4xUINT8(0xa495991b),
+ CONVERT_UINT32_TO_4xUINT8(0x7852b855)
+};
+#define digest_len12 sizeof(digest12)
+
+static const char test_case13[] = "SHA-384 MSG2";
+#define data13 ((const uint8_t *)message2)
+#define data_len13 message2_len
+static const uint8_t digest13[] = {
+ CONVERT_UINT64_TO_8xUINT8(0x38b060a751ac9638),
+ CONVERT_UINT64_TO_8xUINT8(0x4cd9327eb1b1e36a),
+ CONVERT_UINT64_TO_8xUINT8(0x21fdb71114be0743),
+ CONVERT_UINT64_TO_8xUINT8(0x4c0cc7bf63f6e1da),
+ CONVERT_UINT64_TO_8xUINT8(0x274edebfe76f65fb),
+ CONVERT_UINT64_TO_8xUINT8(0xd51ad2f14898b95b)
+};
+#define digest_len13 sizeof(digest13)
+
+static const char test_case14[] = "SHA-512 MSG2";
+#define data14 ((const uint8_t *)message2)
+#define data_len14 message2_len
+static const uint8_t digest14[] = {
+ CONVERT_UINT64_TO_8xUINT8(0xcf83e1357eefb8bd),
+ CONVERT_UINT64_TO_8xUINT8(0xf1542850d66d8007),
+ CONVERT_UINT64_TO_8xUINT8(0xd620e4050b5715dc),
+ CONVERT_UINT64_TO_8xUINT8(0x83f4a921d36ce9ce),
+ CONVERT_UINT64_TO_8xUINT8(0x47d0d13c5d85f2b0),
+ CONVERT_UINT64_TO_8xUINT8(0xff8318d2877eec2f),
+ CONVERT_UINT64_TO_8xUINT8(0x63b931bd47417a81),
+ CONVERT_UINT64_TO_8xUINT8(0xa538327af927da3e)
+};
+#define digest_len14 sizeof(digest14)
+
+static const char test_case20[] = "SHA-1 MSG3";
+#define data20 ((const uint8_t *)message3)
+#define data_len20 message3_len
+static const uint8_t digest20[] = {
+ CONVERT_UINT32_TO_4xUINT8(0x84983e44),
+ CONVERT_UINT32_TO_4xUINT8(0x1c3bd26e),
+ CONVERT_UINT32_TO_4xUINT8(0xbaae4aa1),
+ CONVERT_UINT32_TO_4xUINT8(0xf95129e5),
+ CONVERT_UINT32_TO_4xUINT8(0xe54670f1)
+};
+#define digest_len20 sizeof(digest20)
+
+static const char test_case21[] = "SHA-224 MSG3";
+#define data21 ((const uint8_t *)message3)
+#define data_len21 message3_len
+static const uint8_t digest21[] = {
+ CONVERT_UINT32_TO_4xUINT8(0x75388b16),
+ CONVERT_UINT32_TO_4xUINT8(0x512776cc),
+ CONVERT_UINT32_TO_4xUINT8(0x5dba5da1),
+ CONVERT_UINT32_TO_4xUINT8(0xfd890150),
+ CONVERT_UINT32_TO_4xUINT8(0xb0c6455c),
+ CONVERT_UINT32_TO_4xUINT8(0xb4f58b19),
+ CONVERT_UINT32_TO_4xUINT8(0x52522525)
+};
+#define digest_len21 sizeof(digest21)
+
+static const char test_case22[] = "SHA-256 MSG3";
+#define data22 ((const uint8_t *)message3)
+#define data_len22 message3_len
+static const uint8_t digest22[] = {
+ CONVERT_UINT32_TO_4xUINT8(0x248d6a61),
+ CONVERT_UINT32_TO_4xUINT8(0xd20638b8),
+ CONVERT_UINT32_TO_4xUINT8(0xe5c02693),
+ CONVERT_UINT32_TO_4xUINT8(0x0c3e6039),
+ CONVERT_UINT32_TO_4xUINT8(0xa33ce459),
+ CONVERT_UINT32_TO_4xUINT8(0x64ff2167),
+ CONVERT_UINT32_TO_4xUINT8(0xf6ecedd4),
+ CONVERT_UINT32_TO_4xUINT8(0x19db06c1)
+};
+#define digest_len22 sizeof(digest22)
+
+static const char test_case23[] = "SHA-384 MSG3";
+#define data23 ((const uint8_t *)message3)
+#define data_len23 message3_len
+static const uint8_t digest23[] = {
+ CONVERT_UINT64_TO_8xUINT8(0x3391fdddfc8dc739),
+ CONVERT_UINT64_TO_8xUINT8(0x3707a65b1b470939),
+ CONVERT_UINT64_TO_8xUINT8(0x7cf8b1d162af05ab),
+ CONVERT_UINT64_TO_8xUINT8(0xfe8f450de5f36bc6),
+ CONVERT_UINT64_TO_8xUINT8(0xb0455a8520bc4e6f),
+ CONVERT_UINT64_TO_8xUINT8(0x5fe95b1fe3c8452b)
+};
+#define digest_len23 sizeof(digest23)
+
+static const char test_case24[] = "SHA-512 MSG3";
+#define data24 ((const uint8_t *)message3)
+#define data_len24 message3_len
+static const uint8_t digest24[] = {
+ CONVERT_UINT64_TO_8xUINT8(0x204a8fc6dda82f0a),
+ CONVERT_UINT64_TO_8xUINT8(0x0ced7beb8e08a416),
+ CONVERT_UINT64_TO_8xUINT8(0x57c16ef468b228a8),
+ CONVERT_UINT64_TO_8xUINT8(0x279be331a703c335),
+ CONVERT_UINT64_TO_8xUINT8(0x96fd15c13b1b07f9),
+ CONVERT_UINT64_TO_8xUINT8(0xaa1d3bea57789ca0),
+ CONVERT_UINT64_TO_8xUINT8(0x31ad85c7a71dd703),
+ CONVERT_UINT64_TO_8xUINT8(0x54ec631238ca3445)
+};
+#define digest_len24 sizeof(digest24)
+
+static const char test_case30[] = "SHA-1 MSG4";
+#define data30 ((const uint8_t *)message4)
+#define data_len30 message4_len
+static const uint8_t digest30[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xa49b2446),
+ CONVERT_UINT32_TO_4xUINT8(0xa02c645b),
+ CONVERT_UINT32_TO_4xUINT8(0xf419f995),
+ CONVERT_UINT32_TO_4xUINT8(0xb6709125),
+ CONVERT_UINT32_TO_4xUINT8(0x3a04a259)
+};
+#define digest_len30 sizeof(digest30)
+
+static const char test_case31[] = "SHA-224 MSG4";
+#define data31 ((const uint8_t *)message4)
+#define data_len31 message4_len
+static const uint8_t digest31[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xc97ca9a5),
+ CONVERT_UINT32_TO_4xUINT8(0x59850ce9),
+ CONVERT_UINT32_TO_4xUINT8(0x7a04a96d),
+ CONVERT_UINT32_TO_4xUINT8(0xef6d99a9),
+ CONVERT_UINT32_TO_4xUINT8(0xe0e0e2ab),
+ CONVERT_UINT32_TO_4xUINT8(0x14e6b8df),
+ CONVERT_UINT32_TO_4xUINT8(0x265fc0b3)
+};
+#define digest_len31 sizeof(digest31)
+
+static const char test_case32[] = "SHA-256 MSG4";
+#define data32 ((const uint8_t *)message4)
+#define data_len32 message4_len
+static const uint8_t digest32[] = {
+ CONVERT_UINT32_TO_4xUINT8(0xcf5b16a7),
+ CONVERT_UINT32_TO_4xUINT8(0x78af8380),
+ CONVERT_UINT32_TO_4xUINT8(0x036ce59e),
+ CONVERT_UINT32_TO_4xUINT8(0x7b049237),
+ CONVERT_UINT32_TO_4xUINT8(0x0b249b11),
+ CONVERT_UINT32_TO_4xUINT8(0xe8f07a51),
+ CONVERT_UINT32_TO_4xUINT8(0xafac4503),
+ CONVERT_UINT32_TO_4xUINT8(0x7afee9d1)
+};
+#define digest_len32 sizeof(digest32)
+
+static const char test_case33[] = "SHA-384 MSG4";
+#define data33 ((const uint8_t *)message4)
+#define data_len33 message4_len
+static const uint8_t digest33[] = {
+ CONVERT_UINT64_TO_8xUINT8(0x09330c33f71147e8),
+ CONVERT_UINT64_TO_8xUINT8(0x3d192fc782cd1b47),
+ CONVERT_UINT64_TO_8xUINT8(0x53111b173b3b05d2),
+ CONVERT_UINT64_TO_8xUINT8(0x2fa08086e3b0f712),
+ CONVERT_UINT64_TO_8xUINT8(0xfcc7c71a557e2db9),
+ CONVERT_UINT64_TO_8xUINT8(0x66c3e9fa91746039)
+};
+#define digest_len33 sizeof(digest33)
+
+static const char test_case34[] = "SHA-512 MSG4";
+#define data34 ((const uint8_t *)message4)
+#define data_len34 message4_len
+static const uint8_t digest34[] = {
+ CONVERT_UINT64_TO_8xUINT8(0x8e959b75dae313da),
+ CONVERT_UINT64_TO_8xUINT8(0x8cf4f72814fc143f),
+ CONVERT_UINT64_TO_8xUINT8(0x8f7779c6eb9f7fa1),
+ CONVERT_UINT64_TO_8xUINT8(0x7299aeadb6889018),
+ CONVERT_UINT64_TO_8xUINT8(0x501d289e4900f7e4),
+ CONVERT_UINT64_TO_8xUINT8(0x331b99dec4b5433a),
+ CONVERT_UINT64_TO_8xUINT8(0xc7d329eeb6dd2654),
+ CONVERT_UINT64_TO_8xUINT8(0x5e96e55b874be909)
+};
+#define digest_len34 sizeof(digest34)
+
+#define SHA_TEST_VEC(num, size) \
+ { test_case##num, size, \
+ (const uint8_t *) data##num, data_len##num, \
+ (const uint8_t *) digest##num, digest_len##num }
+
+static const struct sha_vector {
+ const char *test_case;
+ int sha_type; /* 1, 224, 256, 384 or 512 */
+ const uint8_t *data;
+ size_t data_len;
+ const uint8_t *digest;
+ size_t digest_len;
+} sha_vectors[] = {
+ SHA_TEST_VEC(1, 1),
+ SHA_TEST_VEC(2, 224),
+ SHA_TEST_VEC(3, 256),
+ SHA_TEST_VEC(4, 384),
+ SHA_TEST_VEC(5, 512),
+ SHA_TEST_VEC(10, 1),
+ SHA_TEST_VEC(11, 224),
+ SHA_TEST_VEC(12, 256),
+ SHA_TEST_VEC(13, 384),
+ SHA_TEST_VEC(14, 512),
+ SHA_TEST_VEC(20, 1),
+ SHA_TEST_VEC(21, 224),
+ SHA_TEST_VEC(22, 256),
+ SHA_TEST_VEC(23, 384),
+ SHA_TEST_VEC(24, 512),
+ SHA_TEST_VEC(30, 1),
+ SHA_TEST_VEC(31, 224),
+ SHA_TEST_VEC(32, 256),
+ SHA_TEST_VEC(33, 384),
+ SHA_TEST_VEC(34, 512)
+};
+
+static int
+sha_job_ok(const struct sha_vector *vec,
+ const struct JOB_AES_HMAC *job,
+ const uint8_t *auth,
+ const uint8_t *padding,
+ const size_t sizeof_padding)
+{
+ if (job->status != STS_COMPLETED) {
+ printf("line:%d job error status:%d ", __LINE__, job->status);
+ return 0;
+ }
+
+ /* hash checks */
+ if (memcmp(padding, &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding)) {
+ printf("hash overwrite tail\n");
+ hexdump(stderr, "Target",
+ &auth[sizeof_padding + vec->digest_len],
+ sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(padding, &auth[0], sizeof_padding)) {
+ printf("hash overwrite head\n");
+ hexdump(stderr, "Target", &auth[0], sizeof_padding);
+ return 0;
+ }
+
+ if (memcmp(vec->digest, &auth[sizeof_padding],
+ vec->digest_len)) {
+ printf("hash mismatched\n");
+ hexdump(stderr, "Received", &auth[sizeof_padding],
+ vec->digest_len);
+ hexdump(stderr, "Expected", vec->digest,
+ vec->digest_len);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+test_sha(struct MB_MGR *mb_mgr,
+ const struct sha_vector *vec,
+ const int num_jobs)
+{
+ struct JOB_AES_HMAC *job;
+ uint8_t padding[16];
+ uint8_t **auths = malloc(num_jobs * sizeof(void *));
+ int i = 0, jobs_rx = 0, ret = -1;
+
+ if (auths == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end2;
+ }
+
+ memset(padding, -1, sizeof(padding));
+ memset(auths, 0, num_jobs * sizeof(void *));
+
+ for (i = 0; i < num_jobs; i++) {
+ const size_t alloc_len =
+ vec->digest_len + (sizeof(padding) * 2);
+
+ auths[i] = malloc(alloc_len);
+ if (auths[i] == NULL) {
+ fprintf(stderr, "Can't allocate buffer memory\n");
+ goto end;
+ }
+ memset(auths[i], -1, alloc_len);
+ }
+
+ /* empty the manager */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+
+ memset(job, 0, sizeof(*job));
+ job->cipher_direction = ENCRYPT;
+ job->chain_order = HASH_CIPHER;
+ job->auth_tag_output = auths[i] + sizeof(padding);
+ job->auth_tag_output_len_in_bytes = vec->digest_len;
+ job->src = vec->data;
+ job->msg_len_to_hash_in_bytes = vec->data_len;
+ job->cipher_mode = NULL_CIPHER;
+ switch (vec->sha_type) {
+ case 1:
+ job->hash_alg = PLAIN_SHA1;
+ break;
+ case 224:
+ job->hash_alg = PLAIN_SHA_224;
+ break;
+ case 256:
+ job->hash_alg = PLAIN_SHA_256;
+ break;
+ case 384:
+ job->hash_alg = PLAIN_SHA_384;
+ break;
+ case 512:
+ default:
+ job->hash_alg = PLAIN_SHA_512;
+ break;
+ }
+
+ job->user_data = auths[i];
+
+ job = IMB_SUBMIT_JOB(mb_mgr);
+ if (job) {
+ jobs_rx++;
+ if (!sha_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+ }
+
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL) {
+ jobs_rx++;
+ if (!sha_job_ok(vec, job, job->user_data,
+ padding, sizeof(padding)))
+ goto end;
+ }
+
+ if (jobs_rx != num_jobs) {
+ printf("Expected %d jobs, received %d\n", num_jobs, jobs_rx);
+ goto end;
+ }
+ ret = 0;
+
+ end:
+ /* empty the manager before next tests */
+ while ((job = IMB_FLUSH_JOB(mb_mgr)) != NULL)
+ ;
+
+ for (i = 0; i < num_jobs; i++) {
+ if (auths[i] != NULL)
+ free(auths[i]);
+ }
+
+ end2:
+ if (auths != NULL)
+ free(auths);
+
+ return ret;
+}
+
+static int
+test_sha_vectors(struct MB_MGR *mb_mgr, const int num_jobs)
+{
+ const int vectors_cnt =
+ sizeof(sha_vectors) / sizeof(sha_vectors[0]);
+ int vect;
+ int errors = 0;
+
+ printf("SHA standard test vectors (N jobs = %d):\n", num_jobs);
+ for (vect = 1; vect <= vectors_cnt; vect++) {
+ const int idx = vect - 1;
+#ifdef DEBUG
+ printf("[%d/%d] SHA%d Test Case %s data_len:%d "
+ "digest_len:%d\n",
+ vect, vectors_cnt,
+ sha_vectors[idx].sha_type,
+ sha_vectors[idx].test_case,
+ (int) sha_vectors[idx].data_len,
+ (int) sha_vectors[idx].digest_len);
+#else
+ printf(".");
+#endif
+
+ if (test_sha(mb_mgr, &sha_vectors[idx], num_jobs)) {
+ printf("error #%d\n", vect);
+ errors++;
+ }
+ }
+ printf("\n");
+ return errors;
+}
+
+int
+sha_test(const enum arch_type arch,
+ struct MB_MGR *mb_mgr)
+{
+ int errors = 0;
+
+ (void) arch; /* unused */
+
+ errors += test_sha_vectors(mb_mgr, 1);
+
+ if (0 == errors)
+ printf("...Pass\n");
+ else
+ printf("...Fail\n");
+
+ return errors;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test.c
new file mode 100644
index 000000000..d2d113849
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test.c
@@ -0,0 +1,1979 @@
+/*****************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+
+#include "gcm_ctr_vectors_test.h"
+#include "utils.h"
+
+#include "snow3g_test_vectors.h"
+
+#define SNOW3GIVLEN 8
+#define PAD_LEN 16
+cipher_test_vector_t *vecList[MAX_DATA_LEN];
+
+int snow3g_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+int validate_snow3g_f8_1_block(struct MB_MGR *mb_mgr);
+int validate_snow3g_f8_2_block(struct MB_MGR *mb_mgr);
+int validate_snow3g_f8_4_blocks(struct MB_MGR *mb_mgr);
+int validate_snow3g_f8_n_blocks(struct MB_MGR *mb_mgr);
+int validate_snow3g_f9(struct MB_MGR *mb_mgr);
+int membitcmp(const uint8_t *input, const uint8_t *output,
+ const uint32_t bitlength, const uint32_t offset);
+
+/******************************************************************************
+ * @description - utility function to dump test buffers
+ *
+ * @param message [IN] - debug message to print
+ * @param ptr [IN] - pointer to beginning of buffer.
+ * @param len [IN] - length of buffer.
+ ******************************************************************************/
+static inline void snow3g_hexdump(const char *message, uint8_t *ptr, int len)
+{
+ int ctr;
+
+ printf("%s:\n", message);
+ for (ctr = 0; ctr < len; ctr++) {
+ printf("0x%02X ", ptr[ctr] & 0xff);
+ if (!((ctr + 1) % 16))
+ printf("\n");
+ }
+ printf("\n");
+ printf("\n");
+}
+
+int validate_snow3g_f8_1_block(struct MB_MGR *mb_mgr)
+{
+ int numVectors, i, length;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t dstBuff[MAX_DATA_LEN];
+ uint8_t *pIV;
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_1_BUFFER:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pIV = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!pIV) {
+ printf("malloc(pIV):failed !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ free(pIV);
+ return ret;
+ }
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): failed ! "
+ "\n");
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ /*Copy the data for for Snow3g 1 Packet version*/
+ for (i = 0; i < numVectors; i++) {
+
+ length = testVectors[i].dataLenInBytes;
+
+ memcpy(pKey, testVectors[i].key, testVectors[i].keyLenInBytes);
+ memcpy(srcBuff, testVectors[i].plaintext, length);
+
+ memcpy(dstBuff, testVectors[i].ciphertext, length);
+ memcpy(pIV, testVectors[i].iv, testVectors[i].ivLenInBytes);
+
+ /*setup the keysched to be used*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched) == -1) {
+ printf("CPU check failed\n");
+ goto snow3g_f8_1_buffer_exit;
+ }
+
+ /*Validate encrypt*/
+ IMB_SNOW3G_F8_1_BUFFER(mb_mgr, pKeySched, pIV, srcBuff, srcBuff,
+ length);
+
+ /*check against the ciphertext in the vector against the
+ * encrypted plaintext*/
+ if (memcmp(srcBuff, dstBuff, length) != 0) {
+ printf("IMB_SNOW3G_F8_1_BUFFER(Enc) vector:%d\n", i);
+ snow3g_hexdump("Actual:", srcBuff, length);
+ snow3g_hexdump("Expected:", dstBuff, length);
+ goto snow3g_f8_1_buffer_exit;
+ }
+ printf(".");
+
+ memcpy(dstBuff, testVectors[i].plaintext, length);
+
+ /*Validate Decrypt*/
+ IMB_SNOW3G_F8_1_BUFFER(mb_mgr, pKeySched, pIV, srcBuff, srcBuff,
+ length);
+
+ if (memcmp(srcBuff, dstBuff, length) != 0) {
+ printf("IMB_SNOW3G_F8_1_BUFFER(Dec) vector:%d\n", i);
+ snow3g_hexdump("Actual:", srcBuff, length);
+ snow3g_hexdump("Expected:", dstBuff, length);
+ goto snow3g_f8_1_buffer_exit;
+ }
+ printf(".");
+ } /* for numVectors */
+
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_1_buffer_exit:
+ free(pIV);
+ free(pKey);
+ free(pKeySched);
+
+ printf("\n");
+
+ return ret;
+}
+
+/* Shift right a buffer by "offset" bits, "offset" < 8 */
+static void buffer_shift_right(uint8_t *buffer,
+ const uint32_t length,
+ const uint8_t offset)
+{
+ uint8_t prev_byte;
+ const uint32_t length_in_bytes = (length * 8 + offset + 7) / 8;
+ const uint8_t lower_byte_mask = (1 << offset) - 1;
+ uint32_t i;
+
+ prev_byte = buffer[0];
+ buffer[0] >>= offset;
+
+ for (i = 1; i < length_in_bytes; i++) {
+ const uint8_t curr_byte = buffer[i];
+
+ buffer[i] = ((prev_byte & lower_byte_mask) << (8 - offset)) |
+ (curr_byte >> offset);
+ prev_byte = curr_byte;
+ }
+}
+
+static void copy_test_bufs(uint8_t *plainBuff, uint8_t *wrkBuff,
+ uint8_t *ciphBuff, const uint8_t *src_test,
+ const uint8_t *dst_test, const uint32_t byte_len)
+{
+ /*
+ * Reset all buffers
+ * - plain and cipher buffers to 0
+ * - working buffer to -1 (for padding check)
+ * and copy test vectors
+ */
+ memset(wrkBuff, -1, (byte_len + PAD_LEN * 2));
+ memset(plainBuff, 0, (byte_len + PAD_LEN * 2));
+ memset(ciphBuff, 0, (byte_len + PAD_LEN * 2));
+ memcpy(plainBuff + PAD_LEN, src_test, byte_len);
+ memcpy(ciphBuff + PAD_LEN, dst_test, byte_len);
+}
+
+static int validate_snow3g_f8_1_bitblock(struct MB_MGR *mb_mgr)
+{
+ int numVectors, i, length;
+ size_t size = 0;
+ cipherbit_test_linear_vector_t *testVectors =
+ &snow3g_f8_linear_bitvectors /*snow3g_cipher_test_vectors[1]*/;
+ cipher_test_vector_t *testStandardVectors =
+ snow3g_f8_vectors; /* scipher_test_vectors[1]; */
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = MAX_BIT_BUFFERS; /* numSnow3gCipherTestVectors[3]; */
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t midBuff[MAX_DATA_LEN];
+ uint8_t dstBuff[MAX_DATA_LEN];
+ /* Adding extra byte for offset tests (shifting up to 7 bits) */
+ uint8_t padding[PAD_LEN + 1];
+ uint8_t *pIV;
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_1_BUFFER_BIT:\n");
+
+ memset(padding, -1, sizeof(padding));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pIV = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!pIV) {
+ printf("malloc(pIV):failed !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ free(pIV);
+ return ret;
+ }
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): failed ! "
+ "\n");
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ /*Copy the data for for Snow3g 1 Packet version*/
+ for (i = 0; i < numVectors; i++) {
+ uint8_t *midBufBefPad = midBuff;
+ uint8_t *midBufAftPad = midBuff + PAD_LEN;
+ uint8_t *srcBufBefPad = srcBuff;
+ uint8_t *srcBufAftPad = srcBuff + PAD_LEN;
+ uint8_t *dstBufBefPad = dstBuff;
+ uint8_t *dstBufAftPad = dstBuff + PAD_LEN;
+
+ const uint32_t byte_len =
+ (testVectors->dataLenInBits[i] + 7) / 8;
+ const uint32_t bit_len = testVectors->dataLenInBits[i];
+ const uint32_t head_offset = i % 8;
+ const uint32_t tail_offset = (head_offset + bit_len) % 8;
+ const uint32_t final_byte_offset = (bit_len + head_offset) / 8;
+ const uint32_t byte_len_with_offset =
+ (bit_len + head_offset + 7) / 8;
+
+ memcpy(pKey, testVectors->key[i], testVectors->keyLenInBytes);
+ memcpy(pIV, testVectors->iv[i], testVectors->ivLenInBytes);
+ copy_test_bufs(srcBufBefPad, midBufBefPad, dstBufBefPad,
+ testVectors->plaintext[i],
+ testVectors->ciphertext[i],
+ byte_len);
+
+ /* shift buffers by offset for this round */
+ buffer_shift_right(srcBufBefPad, (byte_len + PAD_LEN * 2) * 8,
+ head_offset);
+ buffer_shift_right(dstBufBefPad, (byte_len + PAD_LEN * 2) * 8,
+ head_offset);
+
+ /*setup the keysched to be used*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched) == -1) {
+ printf("CPU check failed\n");
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+
+ /*Validate Encrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(mb_mgr, pKeySched, pIV, srcBufAftPad,
+ midBufAftPad, bit_len, head_offset);
+
+ /*check against the ciphertext in the vector against the
+ * encrypted plaintext*/
+ if (membitcmp(midBufAftPad, dstBufAftPad,
+ bit_len, head_offset) != 0) {
+ printf("Test1: snow3g_f8_1_bitbuffer(Enc) buffer:%d "
+ "size:%d offset:%d\n", i, bit_len, head_offset);
+ snow3g_hexdump("Actual:", midBufAftPad,
+ byte_len_with_offset);
+ snow3g_hexdump("Expected:", dstBufAftPad,
+ byte_len_with_offset);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+
+ /* Check that data not to be ciphered was not overwritten */
+ if (membitcmp(midBufBefPad, padding,
+ (PAD_LEN * 8) + head_offset, 0)) {
+ printf("overwrite head\n");
+ snow3g_hexdump("Head", midBufBefPad, PAD_LEN + 1);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+
+ if (membitcmp(midBufAftPad + final_byte_offset, padding,
+ (PAD_LEN * 8) - tail_offset, tail_offset)) {
+ printf("overwrite tail\n");
+ snow3g_hexdump("Tail", midBufAftPad + final_byte_offset,
+ PAD_LEN + 1);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+
+ /* reset working buffer */
+ memset(midBufBefPad, -1, (byte_len + PAD_LEN * 2));
+
+ /*Validate Decrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(mb_mgr, pKeySched, pIV, dstBufAftPad,
+ midBufAftPad, bit_len, head_offset);
+
+ if (membitcmp(midBufAftPad, srcBufAftPad, bit_len,
+ head_offset) != 0) {
+ printf("Test2: snow3g_f8_1_bitbuffer(Dec) buffer:%d "
+ "size:%d offset:%d\n", i, bit_len, head_offset);
+ snow3g_hexdump("Actual:", midBufAftPad,
+ byte_len_with_offset);
+ snow3g_hexdump("Expected:", srcBufAftPad,
+ byte_len_with_offset);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+
+ /* Check that data not to be ciphered was not overwritten */
+ if (membitcmp(midBufBefPad, padding,
+ (PAD_LEN * 8) + head_offset, 0)) {
+ printf("overwrite head\n");
+ snow3g_hexdump("Head", midBufBefPad, PAD_LEN + 1);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ if (membitcmp(midBufAftPad + final_byte_offset, padding,
+ (PAD_LEN * 8) - tail_offset, tail_offset)) {
+ printf("overwrite tail\n");
+ snow3g_hexdump("Tail", midBufAftPad + final_byte_offset,
+ PAD_LEN + 1);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+
+ /* Another test with Standard 3GPP table */
+ length = testStandardVectors[i].dataLenInBytes;
+ memcpy(srcBuff, testStandardVectors[i].plaintext, length);
+
+ memcpy(dstBuff, testStandardVectors[i].ciphertext, length);
+
+ /*Validate Encrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(
+ mb_mgr, pKeySched, pIV, srcBuff, midBuff,
+ testStandardVectors[i].dataLenInBytes * 8, 0);
+
+ /*check against the ciphertext in the vector against the
+ * encrypted plaintext*/
+ if (membitcmp(midBuff, dstBuff,
+ testStandardVectors[i].dataLenInBytes * 8,
+ 0) != 0) {
+ printf("Test3: snow3g_f8_1_bitbuffer(Enc) buffer:%d "
+ "size:%d offset:0\n",
+ i, testStandardVectors[i].dataLenInBytes * 8);
+ snow3g_hexdump("Actual:", &midBuff[0],
+ testStandardVectors[i].dataLenInBytes);
+ snow3g_hexdump("Expected:", &dstBuff[0],
+ testStandardVectors[i].dataLenInBytes);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+
+ /*Validate Decrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(
+ mb_mgr, pKeySched, pIV, midBuff, dstBuff,
+ testStandardVectors[i].dataLenInBytes * 8, 0);
+
+ if (membitcmp(dstBuff, srcBuff,
+ testStandardVectors[i].dataLenInBytes * 8,
+ 0) != 0) {
+ printf("Test4: snow3g_f8_1_bitbuffer(Dec) buffer:%d "
+ "size:%d offset:0\n",
+ i, testStandardVectors[i].dataLenInBytes * 8);
+ snow3g_hexdump("Actual:", &dstBuff[0],
+ testStandardVectors[i].dataLenInBytes);
+ snow3g_hexdump("Expected:", &srcBuff[0],
+ testStandardVectors[i].dataLenInBytes);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+
+ memcpy(srcBuff, testStandardVectors[i].plaintext, length);
+
+ memcpy(dstBuff, testStandardVectors[i].ciphertext, length);
+
+ buffer_shift_right(srcBuff,
+ testStandardVectors[i].dataLenInBytes, 4);
+ buffer_shift_right(dstBuff,
+ testStandardVectors[i].dataLenInBytes, 4);
+
+ /*Validate Encrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(
+ mb_mgr, pKeySched, pIV, srcBuff, midBuff,
+ testStandardVectors[i].dataLenInBytes * 8, 4);
+
+ /*check against the ciphertext in the vector against the
+ * encrypted plaintext*/
+ if (membitcmp(midBuff, dstBuff,
+ testStandardVectors[i].dataLenInBytes * 8,
+ 4) != 0) {
+ printf("Test5:snow3g_f8_1_bitbuffer(Enc) buffer:%d "
+ "size:%d offset:4\n",
+ i, testStandardVectors[i].dataLenInBytes * 8);
+ snow3g_hexdump("Actual:", &midBuff[0],
+ (testStandardVectors[i].dataLenInBytes *
+ 8 + 4 + 7) / 8);
+ snow3g_hexdump("Expected:", &dstBuff[0],
+ (testStandardVectors[i].dataLenInBytes *
+ 8 + 4 + 7) / 8);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+
+ /*Validate Decrypt*/
+ IMB_SNOW3G_F8_1_BUFFER_BIT(
+ mb_mgr, pKeySched, pIV, /*midBuff*/ dstBuff,
+ /*dstBuff*/ midBuff,
+ testStandardVectors[i].dataLenInBytes * 8, 4);
+
+ if (membitcmp(midBuff /*dstBuff*/, srcBuff,
+ testStandardVectors[i].dataLenInBytes * 8,
+ 4) != 0) {
+ printf("Test6: snow3g_f8_1_bitbuffer(Dec) buffer:%d "
+ "size:%d offset:4\n",
+ i, testStandardVectors[i].dataLenInBytes * 8);
+ snow3g_hexdump("Actual:", &dstBuff[0],
+ (testStandardVectors[i].dataLenInBytes *
+ 8 + 4 + 7) / 8);
+ snow3g_hexdump("Expected:", &srcBuff[0],
+ (testStandardVectors[i].dataLenInBytes *
+ 8 + 4 + 7) / 8);
+ goto snow3g_f8_1_buffer_bit_exit;
+ }
+ printf(".");
+ } /* for numVectors */
+
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_1_buffer_bit_exit:
+ free(pIV);
+ free(pKey);
+ free(pKeySched);
+
+ printf("\n");
+
+ return ret;
+}
+
+static int validate_snow3g_f8_2_blocks(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i = 0, j = 0, numPackets = 2;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t *srcBuff[MAX_DATA_LEN];
+ uint8_t *dstBuff[MAX_DATA_LEN];
+ uint8_t *IV[SNOW3G_IV_LEN_IN_BYTES];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_2_BUFFER:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(key):failed !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): failed ! "
+ "\n");
+ free(pKey);
+ return ret;
+ }
+
+ /* Test with all vectors */
+ for (j = 0; j < numVectors; j++) {
+ int k;
+
+ length = testVectors[j].dataLenInBytes;
+
+ /* Create test Data for num Packets*/
+ for (i = 0; i < numPackets; i++) {
+
+ packetLen[i] = length;
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_2_buffer_exit;
+ }
+
+ memcpy(pKey, testVectors[j].key,
+ testVectors[j].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[j].plaintext, length);
+
+ memset(dstBuff[i], 0, length);
+
+ memcpy(IV[i], testVectors[j].iv,
+ testVectors[j].ivLenInBytes);
+ }
+
+ /*only 1 key is needed for snow3g 2 blocks*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_2_buffer_exit;
+ }
+
+ /* TEST IN-PLACE ENCRYPTION/DECRYPTION */
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_2_BUFFER(mb_mgr, pKeySched, IV[0], IV[1],
+ srcBuff[0], srcBuff[0], packetLen[0],
+ srcBuff[1], srcBuff[1], packetLen[1]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_2_BUFFER(Enc) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i],
+ packetLen[0]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].ciphertext,
+ packetLen[0]);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /* Set the source buffer with ciphertext, and clear destination
+ * buffer */
+ for (i = 0; i < numPackets; i++)
+ memcpy(srcBuff[i], testVectors[j].ciphertext, length);
+
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_2_BUFFER(mb_mgr, pKeySched, IV[0], IV[1],
+ srcBuff[0], srcBuff[0], packetLen[0],
+ srcBuff[1], srcBuff[1], packetLen[1]);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_2_BUFFER(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i],
+ packetLen[0]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /* TEST OUT-OF-PLACE ENCRYPTION/DECRYPTION */
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_2_BUFFER(mb_mgr, pKeySched, IV[0], IV[1],
+ srcBuff[0], dstBuff[0], packetLen[0],
+ srcBuff[1], dstBuff[1], packetLen[1]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_2_BUFFER(Enc) vector:%d "
+ "buffer:%d\n",
+ j, i);
+ snow3g_hexdump("Actual:", dstBuff[i],
+ packetLen[0]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].ciphertext,
+ packetLen[0]);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ printf(".");
+ }
+ /* Set the source buffer with ciphertext, and clear destination
+ * buffer */
+ for (i = 0; i < numPackets; i++) {
+ memcpy(srcBuff[i], testVectors[j].ciphertext, length);
+ memset(dstBuff[i], 0, length);
+ }
+
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_2_BUFFER(mb_mgr, pKeySched, IV[0], IV[1],
+ srcBuff[0], dstBuff[0], packetLen[0],
+ srcBuff[1], dstBuff[1], packetLen[1]);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_2_BUFFER(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", dstBuff[i],
+ packetLen[0]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_2_buffer_exit;
+ }
+ printf(".");
+ }
+ /* free buffers before next iteration */
+ for (k = 0; k < numPackets; k++) {
+ if (srcBuff[k] != NULL) {
+ free(srcBuff[k]);
+ srcBuff[k] = NULL;
+ }
+ if (dstBuff[k] != NULL) {
+ free(dstBuff[k]);
+ dstBuff[k] = NULL;
+ }
+ if (IV[k] != NULL) {
+ free(IV[k]);
+ IV[k] = NULL;
+ }
+ }
+ }
+
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_2_buffer_exit:
+ if (pKey != NULL)
+ free(pKey);
+ if (pKeySched != NULL)
+ free(pKeySched);
+
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ }
+ printf("\n");
+
+ return ret;
+}
+
+int validate_snow3g_f8_4_blocks(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i = 0, j = 0, numPackets = 4;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t *srcBuff[MAX_DATA_LEN];
+ uint8_t *dstBuff[MAX_DATA_LEN];
+ uint8_t *IV[SNOW3G_IV_LEN_IN_BYTES];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_4_BUFFER:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(key):failed !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): failed ! "
+ "\n");
+ free(pKey);
+ return ret;
+ }
+
+ /* Test with all vectors */
+ for (j = 0; j < numVectors; j++) {
+ /*vectors are in bits used to round up to bytes*/
+ length = testVectors[j].dataLenInBytes;
+
+ /* Create test Data for num Packets */
+ for (i = 0; i < numPackets; i++) {
+
+ packetLen[i] = length;
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+
+ memcpy(pKey, testVectors[j].key,
+ testVectors[j].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[j].plaintext, length);
+
+ memset(dstBuff[i], 0, length);
+
+ memcpy(IV[i], testVectors[j].iv,
+ testVectors[j].ivLenInBytes);
+ }
+
+ /*only 1 key is needed for snow3g 4 blocks*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_4_buffer_exit;
+ }
+
+ /* TEST IN-PLACE ENCRYPTION/DECRYPTION */
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_4_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3],
+ srcBuff[0], srcBuff[0], packetLen[0], srcBuff[1],
+ srcBuff[1], packetLen[1], srcBuff[2], srcBuff[2],
+ packetLen[2], srcBuff[3], srcBuff[3], packetLen[3]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_4_BUFFER(Enc) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /* Set the source buffer with ciphertext, and clear destination
+ * buffer */
+ for (i = 0; i < numPackets; i++)
+ memcpy(srcBuff[i], testVectors[j].ciphertext, length);
+
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_4_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3],
+ srcBuff[0], srcBuff[0], packetLen[0], srcBuff[1],
+ srcBuff[1], packetLen[1], srcBuff[2], srcBuff[2],
+ packetLen[2], srcBuff[3], srcBuff[3], packetLen[3]);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_4_BUFFER(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ printf(".");
+ }
+ /* TEST OUT-OF-PLACE ENCRYPTION/DECRYPTION */
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_4_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3],
+ srcBuff[0], dstBuff[0], packetLen[0], srcBuff[1],
+ dstBuff[1], packetLen[1], srcBuff[2], dstBuff[2],
+ packetLen[2], srcBuff[3], dstBuff[3], packetLen[3]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_4_BUFFER(Enc) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", dstBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /* Set the source buffer with ciphertext, and clear destination
+ * buffer */
+ for (i = 0; i < numPackets; i++) {
+ memcpy(srcBuff[i], testVectors[j].ciphertext, length);
+ memset(dstBuff[i], 0, length);
+ }
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_4_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3],
+ srcBuff[0], dstBuff[0], packetLen[0], srcBuff[1],
+ dstBuff[1], packetLen[1], srcBuff[2], dstBuff[2],
+ packetLen[2], srcBuff[3], dstBuff[3], packetLen[3]);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_4_BUFFER(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", dstBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ printf(".");
+ }
+ /* free buffers before next iteration */
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL) {
+ free(srcBuff[i]);
+ srcBuff[i] = NULL;
+ }
+ if (dstBuff[i] != NULL) {
+ free(dstBuff[i]);
+ dstBuff[i] = NULL;
+ }
+ if (IV[i] != NULL) {
+ free(IV[i]);
+ IV[i] = NULL;
+ }
+ }
+ }
+
+ /*vectors are in bits used to round up to bytes*/
+ length = testVectors[1].dataLenInBytes;
+
+ /*Create test Data for num Packets*/
+ for (i = 0; i < numPackets; i++) {
+ /* Test for packets of different length. */
+ packetLen[i] = length - (i * 12);
+ srcBuff[i] = malloc(packetLen[i]);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ dstBuff[i] = malloc(packetLen[i]);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ memcpy(pKey, testVectors[1].key, testVectors[1].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[1].plaintext, packetLen[i]);
+
+ memset(dstBuff[i], 0, packetLen[i]);
+
+ memcpy(IV[i], testVectors[1].iv, testVectors[1].ivLenInBytes);
+ }
+
+ /*only 1 key is needed for snow3g 4 blocks*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_4_buffer_exit;
+ }
+
+ /* Test the encrypt */
+ IMB_SNOW3G_F8_4_BUFFER(mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3],
+ srcBuff[0], dstBuff[0], packetLen[0], srcBuff[1],
+ dstBuff[1], packetLen[1], srcBuff[2], dstBuff[2],
+ packetLen[2], srcBuff[3], dstBuff[3],
+ packetLen[3]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[1].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_4_BUFFER(Enc, diff size) "
+ "vector:%d buffer:%d\n", 1, i);
+ snow3g_hexdump("Actual:", dstBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[1].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_4_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_4_buffer_exit:
+ if (pKey != NULL)
+ free(pKey);
+ if (pKeySched != NULL)
+ free(pKeySched);
+
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ }
+ printf("\n");
+
+ return ret;
+}
+
+static int validate_snow3g_f8_8_blocks(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i, j, numPackets = 8;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t *srcBuff[MAX_DATA_LEN];
+ uint8_t *dstBuff[MAX_DATA_LEN];
+ uint8_t *IV[SNOW3G_IV_LEN_IN_BYTES];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_8_BUFFER:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(key):failed !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): failed ! "
+ "\n");
+ free(pKey);
+ return ret;
+ }
+
+ /* Test with all vectors */
+ for (j = 0; j < numVectors; j++) {
+ int k;
+ /*vectors are in bits used to round up to bytes*/
+ length = testVectors[j].dataLenInBytes;
+
+ /* Create test Data for num Packets*/
+ for (i = 0; i < numPackets; i++) {
+
+ packetLen[i] = length;
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+
+ memcpy(pKey, testVectors[j].key,
+ testVectors[j].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[j].plaintext, length);
+
+ memcpy(dstBuff[i], testVectors[j].ciphertext, length);
+
+ memcpy(IV[i], testVectors[j].iv,
+ testVectors[j].ivLenInBytes);
+ }
+
+ /*only 1 key is needed for snow3g 8 blocks*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_8_buffer_exit;
+ }
+
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_8_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3], IV[4],
+ IV[5], IV[6], IV[7], srcBuff[0], dstBuff[0],
+ packetLen[0], srcBuff[1], dstBuff[1], packetLen[1],
+ srcBuff[2], dstBuff[2], packetLen[2], srcBuff[3],
+ dstBuff[3], packetLen[3], srcBuff[4], dstBuff[4],
+ packetLen[4], srcBuff[5], dstBuff[5], packetLen[5],
+ srcBuff[6], dstBuff[6], packetLen[6], srcBuff[7],
+ dstBuff[7], packetLen[7]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_8_BUFFER(Enc) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", dstBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ printf(".");
+ }
+
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_8_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3], IV[4],
+ IV[5], IV[6], IV[7], dstBuff[0], srcBuff[0],
+ packetLen[0], dstBuff[1], srcBuff[1], packetLen[1],
+ dstBuff[2], srcBuff[2], packetLen[2], dstBuff[3],
+ srcBuff[3], packetLen[3], dstBuff[4], srcBuff[4],
+ packetLen[4], dstBuff[5], srcBuff[5], packetLen[5],
+ dstBuff[6], srcBuff[6], packetLen[6], dstBuff[7],
+ srcBuff[7], packetLen[7]);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(srcBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_8_BUFFER(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i],
+ packetLen[i]);
+ snow3g_hexdump("Expected:",
+ testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ printf(".");
+ }
+ /* free buffers before next iteration */
+ for (k = 0; k < numPackets; k++) {
+ if (srcBuff[k] != NULL) {
+ free(srcBuff[k]);
+ srcBuff[k] = NULL;
+ }
+ if (dstBuff[k] != NULL) {
+ free(dstBuff[k]);
+ dstBuff[k] = NULL;
+ }
+ if (IV[k] != NULL) {
+ free(IV[k]);
+ IV[k] = NULL;
+ }
+ }
+ }
+
+ /*vectors are in bits used to round up to bytes*/
+ length = testVectors[1].dataLenInBytes;
+
+ /*Create test Data for num Packets*/
+ for (i = 0; i < numPackets; i++) {
+ /* Test for packets of different length. */
+ packetLen[i] = length - (i * 12);
+ srcBuff[i] = malloc(packetLen[i]);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ dstBuff[i] = malloc(packetLen[i]);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ memcpy(pKey, testVectors[1].key, testVectors[1].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[1].plaintext, packetLen[i]);
+
+ memset(dstBuff[i], 0, packetLen[i]);
+
+ memcpy(IV[i], testVectors[1].iv, testVectors[1].ivLenInBytes);
+ }
+
+ /*only 1 key is needed for snow3g 8 blocks*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_8_buffer_exit;
+ }
+
+ /* Test the encrypt */
+ IMB_SNOW3G_F8_8_BUFFER(
+ mb_mgr, pKeySched, IV[0], IV[1], IV[2], IV[3], IV[4], IV[5],
+ IV[6], IV[7], srcBuff[0], dstBuff[0], packetLen[0], srcBuff[1],
+ dstBuff[1], packetLen[1], srcBuff[2], dstBuff[2], packetLen[2],
+ srcBuff[3], dstBuff[3], packetLen[3], srcBuff[4], dstBuff[4],
+ packetLen[4], srcBuff[5], dstBuff[5], packetLen[5], srcBuff[6],
+ dstBuff[6], packetLen[6], srcBuff[7], dstBuff[7], packetLen[7]);
+
+ /*compare the ciphertext with the encryped plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ if (memcmp(dstBuff[i], testVectors[1].ciphertext,
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_8_BUFFER(Enc, diff size) "
+ "vector:%d buffer:%d\n",
+ 1, i);
+ snow3g_hexdump("Actual:", dstBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[1].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_8_buffer_exit;
+ }
+ printf(".");
+ }
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_8_buffer_exit:
+ if (pKey != NULL)
+ free(pKey);
+ if (pKeySched != NULL)
+ free(pKeySched);
+
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ }
+ printf("\n");
+
+ return ret;
+}
+
+static int validate_snow3g_f8_8_blocks_multi_key(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i, j, numPackets = 8;
+ size_t size = 0;
+
+ if (numPackets > NUM_SUPPORTED_BUFFERS) {
+ printf("numPackets %d too large !\n", numPackets);
+ printf("Setting to NUM_SUPPORTED_BUFFERS %d\n",
+ NUM_SUPPORTED_BUFFERS);
+ numPackets = NUM_SUPPORTED_BUFFERS;
+ }
+
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched[MAX_DATA_LEN];
+ uint8_t *pKey[MAX_DATA_LEN];
+ uint8_t *srcBuff[MAX_DATA_LEN];
+ uint8_t *dstBuff[MAX_DATA_LEN];
+ uint8_t *IV[MAX_DATA_LEN];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_8_BUFFER_MULTIKEY:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+ memset(pKey, 0, sizeof(pKey));
+ memset(packetLen, 0, sizeof(packetLen));
+ memset(pKeySched, 0, sizeof(pKeySched));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ printf("snow3g_key_sched_multi_size() failure !\n");
+ return ret;
+ }
+
+ for (i = 0; i < numPackets; i++) {
+ j = i % numVectors;
+
+ length = testVectors[j].dataLenInBytes;
+ packetLen[i] = length;
+ pKeySched[i] = malloc(size);
+ if (!pKeySched[i]) {
+ printf("malloc(pKeySched[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ pKey[i] = malloc(testVectors[j].keyLenInBytes);
+ if (!pKey[i]) {
+ printf("malloc(pKey[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+
+ memcpy(pKey[i], testVectors[j].key,
+ testVectors[j].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[j].plaintext, length);
+
+ memcpy(IV[i], testVectors[j].iv, testVectors[j].ivLenInBytes);
+
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey[i], pKeySched[i])) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr) error\n");
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ }
+
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_8_BUFFER_MULTIKEY(mb_mgr,
+ (const snow3g_key_schedule_t * const *)
+ pKeySched,
+ (const void * const *)IV,
+ (const void * const *)srcBuff,
+ (void **)dstBuff,
+ packetLen);
+
+ /*compare the ciphertext with the encrypted plaintext*/
+ for (i = 0; i < numPackets; i++) {
+ j = i % numVectors;
+ if (memcmp(dstBuff[i], testVectors[j].ciphertext,
+ packetLen[i]) != 0) {
+ printf("snow3g_f8_8_multi_buffer(Enc) vector:%d "
+ "buffer:%d\n",
+ j, i);
+ snow3g_hexdump("Actual:", dstBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[j].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ printf(".");
+ }
+
+ /*Test the decrypt*/
+ IMB_SNOW3G_F8_8_BUFFER_MULTIKEY(
+ mb_mgr, (const snow3g_key_schedule_t * const *) pKeySched,
+ (const void * const *)IV, (const void * const *)dstBuff,
+ (void **)srcBuff, packetLen);
+
+ /*Compare the plaintext with the decrypted ciphertext*/
+ for (i = 0; i < numPackets; i++) {
+ j = i % numVectors;
+ if (memcmp(srcBuff[i], testVectors[j].plaintext,
+ packetLen[i]) != 0) {
+ printf("snow3g_f8_8_multi_buffer(Dec) vector:%d "
+ "buffer:%d\n", j, i);
+ snow3g_hexdump("Actual:", srcBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[j].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_8_buffer_multikey_exit;
+ }
+ printf(".");
+ }
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_8_buffer_multikey_exit:
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ if (pKey[i] != NULL)
+ free(pKey[i]);
+ if (pKeySched[i] != NULL)
+ free(pKeySched[i]);
+
+ }
+ printf("\n");
+
+ return ret;
+}
+
+int validate_snow3g_f8_n_blocks(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i, numPackets = 16;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t *srcBuff[NUM_SUPPORTED_BUFFERS];
+ uint8_t *dstBuff[NUM_SUPPORTED_BUFFERS];
+ uint8_t *IV[NUM_SUPPORTED_BUFFERS];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_N_BUFFER:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(key):failed !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(pKeySched): failed !\n");
+ free(pKey);
+ return ret;
+ }
+
+ /*vectors are in bits used to round up to bytes*/
+ length = testVectors[0].dataLenInBytes;
+
+ /* Create test Data for num Packets*/
+ for (i = 0; i < numPackets; i++) {
+
+ packetLen[i] = length;
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_exit;
+ }
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_exit;
+ }
+
+ memcpy(pKey, testVectors[0].key, testVectors[0].keyLenInBytes);
+ memcpy(srcBuff[i], testVectors[0].plaintext, length);
+ memcpy(IV[i], testVectors[0].iv, testVectors[0].ivLenInBytes);
+ }
+
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED() error\n");
+ goto snow3g_f8_n_buffer_exit;
+ }
+
+ for (i = 0; i < NUM_SUPPORTED_BUFFERS; i++) {
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_N_BUFFER(mb_mgr, pKeySched,
+ (const void * const *)IV,
+ (const void * const *)srcBuff,
+ (void **)dstBuff,
+ packetLen, i + 1);
+ if (dstBuff[0] == NULL) {
+ printf("N buffer failure\n");
+ goto snow3g_f8_n_buffer_exit;
+ }
+
+ /*Compare the data in the dstBuff with the cipher pattern*/
+ if (memcmp(testVectors[0].ciphertext, dstBuff[i],
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_N_BUFFER(Enc) , vector:%d\n", i);
+ snow3g_hexdump("Actual:", dstBuff[i], packetLen[0]);
+ snow3g_hexdump("Expected:", testVectors[0].ciphertext,
+ packetLen[0]);
+ goto snow3g_f8_n_buffer_exit;
+ }
+ printf(".");
+
+ /*Test the Decrypt*/
+ IMB_SNOW3G_F8_N_BUFFER(mb_mgr, pKeySched,
+ (const void * const *)IV,
+ (const void * const *)dstBuff,
+ (void **)srcBuff,
+ packetLen, i + 1);
+ if (srcBuff[0] == NULL) {
+ printf("N buffer failure\n");
+ goto snow3g_f8_n_buffer_exit;
+ }
+
+ /*Compare the data in the srcBuff with the dstBuff*/
+ if (memcmp(srcBuff[i], testVectors[0].plaintext,
+ packetLen[i]) != 0) {
+ printf("snow3g_f8_n_buffer equal sizes, vector:%d\n",
+ i);
+ snow3g_hexdump("Actual:", srcBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[0].plaintext,
+ packetLen[0]);
+ goto snow3g_f8_n_buffer_exit;
+ }
+ printf(".");
+ }
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_n_buffer_exit:
+ if (pKey != NULL)
+ free(pKey);
+ if (pKeySched != NULL)
+ free(pKeySched);
+
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ }
+ printf("\n");
+
+ return ret;
+}
+
+static int validate_snow3g_f8_n_blocks_multi(struct MB_MGR *mb_mgr)
+{
+ int length, numVectors, i, numPackets = NUM_SUPPORTED_BUFFERS;
+ size_t size = 0;
+ cipher_test_vector_t *testVectors = snow3g_cipher_test_vectors[1];
+ /* snow3g f8 test vectors are located at index 1 */
+ numVectors = numSnow3gCipherTestVectors[1];
+
+ snow3g_key_schedule_t *pKeySched[MAX_DATA_LEN];
+ uint8_t *pKey[MAX_DATA_LEN];
+ uint8_t *srcBuff[MAX_DATA_LEN];
+ uint8_t *dstBuff[MAX_DATA_LEN];
+ uint8_t *IV[MAX_DATA_LEN];
+ uint32_t packetLen[MAX_DATA_LEN];
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F8_N_BUFFER_MULTIKEY:\n");
+
+ memset(srcBuff, 0, sizeof(srcBuff));
+ memset(dstBuff, 0, sizeof(dstBuff));
+ memset(IV, 0, sizeof(IV));
+ memset(pKeySched, 0, sizeof(pKeySched));
+ memset(pKey, 0, sizeof(pKey));
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ printf("snow3g_key_sched_multi_size() failure !\n");
+ return ret;
+ }
+
+ for (i = 0; i < numPackets; i++) {
+ length = testVectors[0].dataLenInBytes;
+ packetLen[i] = length;
+ pKeySched[i] = malloc(size);
+ if (!pKeySched[i]) {
+ printf("malloc(pKeySched[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ srcBuff[i] = malloc(length);
+ if (!srcBuff[i]) {
+ printf("malloc(srcBuff[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ dstBuff[i] = malloc(length);
+ if (!dstBuff[i]) {
+ printf("malloc(dstBuff[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ pKey[i] = malloc(testVectors[0].keyLenInBytes);
+ if (!pKey[i]) {
+ printf("malloc(pKey[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ IV[i] = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!IV[i]) {
+ printf("malloc(IV[%d]):failed !\n", i);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+
+ memcpy(pKey[i], testVectors[0].key,
+ testVectors[0].keyLenInBytes);
+
+ memcpy(srcBuff[i], testVectors[0].plaintext, length);
+
+ memcpy(IV[i], testVectors[0].iv, testVectors[0].ivLenInBytes);
+
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey[i], pKeySched[i])) {
+ printf("IMB_SNOW3G_INIT_KEY_SCHED() error\n");
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ }
+
+ for (i = 0; i < numPackets; i++) {
+ /*Test the encrypt*/
+ IMB_SNOW3G_F8_N_BUFFER_MULTIKEY(
+ mb_mgr,
+ (const snow3g_key_schedule_t * const *)pKeySched,
+ (const void * const *)IV,
+ (const void * const *)srcBuff,
+ (void **)dstBuff, packetLen, i + 1);
+
+ if (dstBuff[0] == NULL) {
+ printf("N buffer failure\n");
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+
+ /*Compare the data in the dstBuff with the cipher pattern*/
+ if (memcmp(testVectors[0].ciphertext, dstBuff[i],
+ packetLen[i]) != 0) {
+ printf("IMB_SNOW3G_F8_N_BUFFER(Enc) , vector:%d "
+ "buffer: %d\n", 0, i);
+ snow3g_hexdump("Actual:", dstBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[0].ciphertext,
+ packetLen[i]);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ printf(".");
+
+ /*Test the Decrypt*/
+ IMB_SNOW3G_F8_N_BUFFER_MULTIKEY(
+ mb_mgr,
+ (const snow3g_key_schedule_t * const *) pKeySched,
+ (const void * const *)IV,
+ (const void * const *)dstBuff,
+ (void **)srcBuff, packetLen, i + 1);
+
+ if (srcBuff[0] == NULL) {
+ printf("N buffer failure\n");
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+
+ /*Compare the data in the srcBuff with the dstBuff*/
+ if (memcmp(srcBuff[i], testVectors[0].plaintext,
+ packetLen[i]) != 0) {
+ printf("snow3g_f8_n_buffer equal sizes, vector:%d "
+ "buffer: %d\n", 0, i);
+ snow3g_hexdump("Actual:", srcBuff[i], packetLen[i]);
+ snow3g_hexdump("Expected:", testVectors[0].plaintext,
+ packetLen[i]);
+ goto snow3g_f8_n_buffer_multikey_exit;
+ }
+ printf(".");
+ }
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f8_n_buffer_multikey_exit:
+ for (i = 0; i < numPackets; i++) {
+ if (srcBuff[i] != NULL)
+ free(srcBuff[i]);
+ if (dstBuff[i] != NULL)
+ free(dstBuff[i]);
+ if (IV[i] != NULL)
+ free(IV[i]);
+ if (pKey[i] != NULL)
+ free(pKey[i]);
+ if (pKeySched[i] != NULL)
+ free(pKeySched[i]);
+
+ }
+ printf("\n");
+
+ return ret;
+}
+
+int validate_snow3g_f9(struct MB_MGR *mb_mgr)
+{
+ int numVectors, i, inputLen;
+ size_t size = 0;
+ hash_test_vector_t *testVectors = snow3g_hash_test_vectors[2];
+ /* snow3g f9 test vectors are located at index 2 */
+ numVectors = numSnow3gHashTestVectors[2];
+
+ snow3g_key_schedule_t *pKeySched = NULL;
+ uint8_t *pKey = NULL;
+ int keyLen = MAX_KEY_LEN;
+ uint8_t srcBuff[MAX_DATA_LEN];
+ uint8_t digest[DIGEST_LEN];
+ uint8_t *pIV;
+ int ret = 1;
+
+ printf("Testing IMB_SNOW3G_F9_1_BUFFER:\n");
+
+ if (!numVectors) {
+ printf("No Snow3G test vectors found !\n");
+ return ret;
+ }
+
+ pIV = malloc(SNOW3G_IV_LEN_IN_BYTES);
+ if (!pIV) {
+ printf("malloc(pIV):failed !\n");
+ return ret;
+ }
+
+ pKey = malloc(keyLen);
+ if (!pKey) {
+ printf("malloc(pKey):failed !\n");
+ free(pIV);
+ return ret;
+ }
+ size = IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr);
+ if (!size) {
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ pKeySched = malloc(size);
+ if (!pKeySched) {
+ printf("malloc(IMB_SNOW3G_KEY_SCHED_SIZE(mb_mgr)): "
+ "failed !\n");
+ free(pIV);
+ free(pKey);
+ return ret;
+ }
+
+ /*Get test data for for Snow3g 1 Packet version*/
+ for (i = 0; i < numVectors; i++) {
+ inputLen = (testVectors[i].lengthInBits + 7) / 8;
+
+ memcpy(pKey, testVectors[i].key, testVectors[i].keyLenInBytes);
+ memcpy(srcBuff, testVectors[i].input, inputLen);
+ memcpy(pIV, testVectors[i].iv, testVectors[i].ivLenInBytes);
+
+ /*Only 1 key sched is used*/
+ if (IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, pKey, pKeySched)) {
+ printf("kasumi_init_f9_key_sched()error\n");
+ goto snow3g_f9_1_buffer_exit;
+ }
+
+ /*test the integrity for f9_user with IV*/
+ IMB_SNOW3G_F9_1_BUFFER(mb_mgr, pKeySched, pIV, srcBuff,
+ testVectors[i].lengthInBits, digest);
+
+ /*Compare the digest with the expected in the vectors*/
+ if (memcmp(digest, testVectors[i].exp_out, DIGEST_LEN) != 0) {
+ printf("IMB_SNOW3G_F9_1_BUFFER() vector num:%d\n", i);
+ snow3g_hexdump("Actual:", digest, DIGEST_LEN);
+ snow3g_hexdump("Expected:", testVectors[i].exp_out,
+ DIGEST_LEN);
+ goto snow3g_f9_1_buffer_exit;
+ }
+ printf(".");
+
+ } /* for numVectors */
+ /* no errors detected */
+ ret = 0;
+
+snow3g_f9_1_buffer_exit:
+ free(pIV);
+ free(pKey);
+ free(pKeySched);
+ printf("\n");
+
+ return ret;
+}
+
+static int validate_f8_iv_gen(void)
+{
+ uint32_t i;
+ uint8_t IV[16];
+ const uint32_t numVectors = MAX_BIT_BUFFERS;
+
+ printf("Testing snow3g_f8_iv_gen:\n");
+
+ /* skip first vector as it's not part of test data */
+ for (i = 1; i < numVectors; i++) {
+ cipher_iv_gen_params_t *iv_params =
+ &snow3g_f8_linear_bitvectors.iv_params[i];
+
+ memset(IV, 0, sizeof(IV));
+
+ /* generate IV */
+ if (snow3g_f8_iv_gen(iv_params->count, iv_params->bearer,
+ iv_params->dir, &IV) < 0)
+ return 1;
+
+ /* validate result */
+ if (memcmp(IV, snow3g_f8_linear_bitvectors.iv[i], 16) != 0) {
+ printf("snow3g_f8_iv_gen vector num: %d\n", i);
+ snow3g_hexdump("Actual", IV, 16);
+ snow3g_hexdump("Expected",
+ snow3g_f8_linear_bitvectors.iv[i], 16);
+ return 1;
+ } else
+ printf(".");
+ }
+
+ printf("\n");
+ return 0;
+}
+
+static int validate_f9_iv_gen(void)
+{
+ uint32_t i;
+ uint8_t IV[16];
+ /* snow3g f9 test vectors are located at index 2 */
+ const uint32_t numVectors = numSnow3gHashTestVectors[2];
+
+ printf("Testing snow3g_f9_iv_gen:\n");
+
+ /* 6 test sets */
+ for (i = 0; i < numVectors; i++) {
+ hash_iv_gen_params_t *iv_params =
+ &snow_f9_vectors[i].iv_params;
+
+ memset(IV, 0, sizeof(IV));
+
+ /* generate IV */
+ if (snow3g_f9_iv_gen(iv_params->count, iv_params->fresh,
+ iv_params->dir, &IV) < 0)
+ return 1;
+
+ /* validate result */
+ if (memcmp(IV, snow_f9_vectors[i].iv, 16) != 0) {
+ printf("snow3g_f9_iv_gen vector num: %d\n", i);
+ snow3g_hexdump("Actual", IV, 16);
+ snow3g_hexdump("Expected", snow_f9_vectors[i].iv, 16);
+ return 1;
+ } else
+ printf(".");
+ }
+
+ printf("\n");
+ return 0;
+}
+
+int snow3g_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+ int status = 0;
+ (void)(arch);
+
+
+ if (validate_f8_iv_gen()) {
+ printf("validate_snow3g_f8_iv_gen:: FAIL\n");
+ status = 1;
+ }
+ if (validate_f9_iv_gen()) {
+ printf("validate_snow3g_f9_iv_gen:: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_1_block(mb_mgr)) {
+ printf("validate_snow3g_f8_1_block: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_1_bitblock(mb_mgr)) {
+ printf("validate_snow3g_f8_1_bitblock: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_2_blocks(mb_mgr)) {
+ printf("validate_snow3g_f8_2_blocks: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_4_blocks(mb_mgr)) {
+ printf("validate_snow3g_f8_4_blocks: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_8_blocks(mb_mgr)) {
+ printf("validate_snow3g_f8_8_blocks: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_8_blocks_multi_key(mb_mgr)) {
+ printf("validate_snow3g_f8_8_blocks_multi_key: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f8_n_blocks(mb_mgr)) {
+ printf("validate_snow3g_f8_n_blocks: FAIL\n");
+ status = 1;
+ }
+ if (validate_snow3g_f8_n_blocks_multi(mb_mgr)) {
+ printf("validate_snow3g_f8_n_blocks: FAIL\n");
+ status = 1;
+ }
+
+ if (validate_snow3g_f9(mb_mgr)) {
+ printf("validate_snow3g_f9: FAIL\n");
+ status = 1;
+ }
+
+ if (!status)
+ printf("ALL TESTS PASSED.\n");
+ else
+ printf("WE HAVE TEST FAILURES !\n");
+
+ return status;
+}
+
+int membitcmp(const uint8_t *input, const uint8_t *output,
+ const uint32_t bitlength, const uint32_t bitoffset)
+{
+ uint32_t bitresoffset;
+ uint8_t bitresMask = ~((uint8_t)-1 << (8 - (bitoffset % 8)));
+ uint32_t res = 0;
+ uint32_t bytelengthfl = bitlength / 8;
+ const uint8_t *srcfl = input + bitoffset / 8;
+ const uint8_t *dstfl = output + bitoffset / 8;
+ int index = 1;
+
+ if (bitoffset % 8) {
+ if ((*srcfl ^ *dstfl) & bitresMask) {
+ return 1;
+ } else {
+ srcfl++;
+ dstfl++;
+ }
+ }
+ bitresoffset = (bitlength + bitoffset) % 8;
+ while (bytelengthfl--) {
+ res = *srcfl++ ^ *dstfl++;
+ if (res)
+ break;
+ index++;
+ }
+ if ((bitresoffset) && (0 == bytelengthfl)) {
+ res &= (uint8_t)-1 << (8 - bitresoffset);
+ if (res)
+ return index;
+ }
+ return res;
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test_vectors.h b/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test_vectors.h
new file mode 100644
index 000000000..d4f6fc6e0
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/snow3g_test_vectors.h
@@ -0,0 +1,802 @@
+/*****************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifndef __SSO_TEST_VECTORS_H__
+#define __SSO_TEST_VECTORS_H__
+
+#define MAX_DATA_LEN (3048)
+#define MAX_KEY_LEN (32)
+#define MAX_IV_LEN (16)
+#define BLOCK_SIZE (8)
+#define DIGEST_LEN (4)
+#define NUM_SUPPORTED_BUFFERS (16)
+#define MAX_SIZE_IN_BYTES_1024 (1024)
+#define MAX_BIT_BUFFERS (6)
+#define SNOW3G_KEY_LEN_IN_BYTES (16) /* 128b */
+#define SNOW3G_IV_LEN_IN_BYTES (16) /* 128b */
+
+typedef struct cipher_iv_gen_params_s {
+ uint32_t count;
+ uint8_t bearer;
+ uint8_t dir;
+} cipher_iv_gen_params_t;
+typedef struct cipher_test_vector_s {
+ uint32_t dataLenInBytes;
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint8_t key[MAX_KEY_LEN];
+ uint8_t iv[MAX_IV_LEN];
+} cipher_test_vector_t;
+typedef struct cipherbit_test_vector_s {
+ uint32_t dataLenInBits;
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_DATA_LEN];
+ uint8_t key[MAX_KEY_LEN];
+ uint8_t iv[MAX_IV_LEN];
+} cipherbit_test_vector_t;
+typedef struct cipherbit_test_linear_vector_s {
+ uint32_t dataLenInBits[MAX_BIT_BUFFERS];
+ uint32_t keyLenInBytes;
+ uint32_t ivLenInBytes;
+ uint8_t plaintext[MAX_BIT_BUFFERS][MAX_DATA_LEN];
+ uint8_t ciphertext[MAX_BIT_BUFFERS][MAX_DATA_LEN];
+ uint8_t key[MAX_BIT_BUFFERS][MAX_KEY_LEN];
+ uint8_t iv[MAX_BIT_BUFFERS][MAX_IV_LEN];
+ cipher_iv_gen_params_t iv_params[MAX_BIT_BUFFERS];
+} cipherbit_test_linear_vector_t;
+
+
+typedef struct hash_iv_gen_params_s {
+ uint32_t count;
+ uint32_t fresh;
+ uint8_t dir;
+} hash_iv_gen_params_t;
+typedef struct hash_test_vector_s {
+ uint8_t input[MAX_DATA_LEN];
+ uint32_t lengthInBits;
+ uint8_t key[MAX_DATA_LEN];
+ uint32_t keyLenInBytes;
+ uint8_t exp_out[BLOCK_SIZE];
+ uint8_t iv[MAX_DATA_LEN];
+ uint32_t ivLenInBytes;
+ uint32_t direction;
+ hash_iv_gen_params_t iv_params;
+} hash_test_vector_t;
+
+static cipherbit_test_linear_vector_t snow3g_f8_linear_bitvectors = {
+ {256, 798, 120, 510, 253, 837},
+ 16,
+ 16,
+ {/*plaintext linear bit bit buffer*/
+ /* reference data from 3GPP tests 1-5 */
+ {
+ 0x98, 0x1b, 0xa6, 0x82, 0x4c, 0x1b, 0xfb, 0x1a,
+ 0xb4, 0x85, 0x47, 0x20, 0x29, 0xb7, 0x1d, 0x80,
+ 0x8c, 0xe3, 0x3e, 0x2c, 0xc3, 0xc0, 0xb5, 0xfc,
+ 0x1f, 0x3d, 0xe8, 0xa6, 0xdc, 0x66, 0xb1, 0xf0
+ },
+ {
+ 0x7e, 0xc6, 0x12, 0x72, 0x74, 0x3b, 0xf1, 0x61,
+ 0x47, 0x26, 0x44, 0x6a, 0x6c, 0x38, 0xce, 0xd1,
+ 0x66, 0xf6, 0xca, 0x76, 0xeb, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6c, 0xef, 0x13, 0xf, 0x92,
+ 0x92, 0x2b, 0x3, 0x45, 0xd, 0x3a, 0x99, 0x75,
+ 0xe5, 0xbd, 0x2e, 0xa0, 0xeb, 0x55, 0xad, 0x8e,
+ 0x1b, 0x19, 0x9e, 0x3e, 0xc4, 0x31, 0x60, 0x20,
+ 0xe9, 0xa1, 0xb2, 0x85, 0xe7, 0x62, 0x79, 0x53,
+ 0x59, 0xb7, 0xbd, 0xfd, 0x39, 0xbe, 0xf4, 0xb2,
+ 0x48, 0x45, 0x83, 0xd5, 0xaf, 0xe0, 0x82, 0xae,
+ 0xe6, 0x38, 0xbf, 0x5f, 0xd5, 0xa6, 0x6, 0x19,
+ 0x39, 0x1, 0xa0, 0x8f, 0x4a, 0xb4, 0x1a, 0xab,
+ 0x9b, 0x13, 0x48, 0x80
+ },
+ {
+ 0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4,
+ 0x57, 0xA4, 0x9D, 0x42, 0x14, 0x07, 0xE8
+ },
+ {
+ 0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A,
+ 0x43, 0xFD, 0x3F, 0x57, 0xE3, 0x76, 0x07, 0xAB,
+ 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1, 0xBB, 0xDA,
+ 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D,
+ 0x1B, 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50,
+ 0x36, 0x7F, 0xA3, 0x6C, 0xE3, 0xBC, 0x68, 0xF1,
+ 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B, 0x02,
+ 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8
+ },
+ {
+ 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A,
+ 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D, 0x80,
+ 0x8C, 0xE3, 0x3E, 0x2C, 0xC3, 0xC0, 0xB5, 0xFC,
+ 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x66, 0xB1, 0xF0
+ },
+ {
+ 0x40, 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB,
+ 0x42, 0x86, 0xB2, 0x99, 0x78, 0x3D, 0xAF, 0x44,
+ 0x2C, 0x09, 0x9F, 0x7A, 0xB0, 0xF5, 0x8D, 0x5C,
+ 0x8E, 0x46, 0xB1, 0x04, 0xF0, 0x8F, 0x01, 0xB4,
+ 0x1A, 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D,
+ 0x36, 0xBD, 0x1A, 0x3D, 0x90, 0xDC, 0x3A, 0x41,
+ 0xB4, 0x6D, 0x51, 0x67, 0x2A, 0xC4, 0xC9, 0x66,
+ 0x3A, 0x2B, 0xE0, 0x63, 0xDA, 0x4B, 0xC8, 0xD2,
+ 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xCC, 0xBF, 0xC6,
+ 0x34, 0xE1, 0xB2, 0x59, 0x06, 0x08, 0x76, 0xA0,
+ 0xFB, 0xB5, 0xA4, 0x37, 0xEB, 0xCC, 0x8D, 0x31,
+ 0xC1, 0x9E, 0x44, 0x54, 0x31, 0x87, 0x45, 0xE3,
+ 0x98, 0x76, 0x45, 0x98, 0x7A, 0x98, 0x6F, 0x2C,
+ 0xB0
+ },
+ },
+ {/*ciphertext linear bit buffer*/
+ /* reference data from 3GPP tests 1-5 */
+ {
+ 0x5d, 0x5b, 0xfe, 0x75, 0xeb, 0x04, 0xf6, 0x8c,
+ 0xe0, 0xa1, 0x23, 0x77, 0xea, 0x00, 0xb3, 0x7d,
+ 0x47, 0xc6, 0xa0, 0xba, 0x06, 0x30, 0x91, 0x55,
+ 0x08, 0x6a, 0x85, 0x9c, 0x43, 0x41, 0xb3, 0x7C
+ },
+ {
+ 0x8C, 0xEB, 0xA6, 0x29, 0x43, 0xDC, 0xED, 0x3A,
+ 0x09, 0x90, 0xB0, 0x6E, 0xA1, 0xB0, 0xA2, 0xC4,
+ 0xFB, 0x3C, 0xED, 0xC7, 0x1B, 0x36, 0x9F, 0x42,
+ 0xBA, 0x64, 0xC1, 0xEB, 0x66, 0x65, 0xE7, 0x2A,
+ 0xA1, 0xC9, 0xBB, 0x0D, 0xEA, 0xA2, 0x0F, 0xE8,
+ 0x60, 0x58, 0xB8, 0xBA, 0xEE, 0x2C, 0x2E, 0x7F,
+ 0x0B, 0xEC, 0xCE, 0x48, 0xB5, 0x29, 0x32, 0xA5,
+ 0x3C, 0x9D, 0x5F, 0x93, 0x1A, 0x3A, 0x7C, 0x53,
+ 0x22, 0x59, 0xAF, 0x43, 0x25, 0xE2, 0xA6, 0x5E,
+ 0x30, 0x84, 0xAD, 0x5F, 0x6A, 0x51, 0x3B, 0x7B,
+ 0xDD, 0xC1, 0xB6, 0x5F, 0x0A, 0xA0, 0xD9, 0x7A,
+ 0x05, 0x3D, 0xB5, 0x5A, 0x88, 0xC4, 0xC4, 0xF9,
+ 0x60, 0x5E, 0x41, 0x40
+ },
+ {
+ 0xBA, 0x0F, 0x31, 0x30, 0x03, 0x34, 0xC5, 0x6B,
+ 0x52, 0xA7, 0x49, 0x7C, 0xBA, 0xC0, 0x46
+ },
+ {
+ 0xE0, 0xDA, 0x15, 0xCA, 0x8E, 0x25, 0x54, 0xF5,
+ 0xE5, 0x6C, 0x94, 0x68, 0xDC, 0x6C, 0x7C, 0x12,
+ 0x9C, 0x56, 0x8A, 0xA5, 0x03, 0x23, 0x17, 0xE0,
+ 0x4E, 0x07, 0x29, 0x64, 0x6C, 0xAB, 0xEF, 0xA6,
+ 0x89, 0x86, 0x4C, 0x41, 0x0F, 0x24, 0xF9, 0x19,
+ 0xE6, 0x1E, 0x3D, 0xFD, 0xFA, 0xD7, 0x7E, 0x56,
+ 0x0D, 0xB0, 0xA9, 0xCD, 0x36, 0xC3, 0x4A, 0xE4,
+ 0x18, 0x14, 0x90, 0xB2, 0x9F, 0x5F, 0xA2, 0xFC
+ },
+ {
+ 0x98, 0x9B, 0x71, 0x9C, 0xDC, 0x33, 0xCE, 0xB7,
+ 0xCF, 0x27, 0x6A, 0x52, 0x82, 0x7C, 0xEF, 0x94,
+ 0xA5, 0x6C, 0x40, 0xC0, 0xAB, 0x9D, 0x81, 0xF7,
+ 0xA2, 0xA9, 0xBA, 0xC6, 0x0E, 0x11, 0xC4, 0xB0
+ },
+ {
+ 0x58, 0x92, 0xBB, 0xA8, 0x8B, 0xBB, 0xCA, 0xAE,
+ 0xAE, 0x76, 0x9A, 0xA0, 0x6B, 0x68, 0x3D, 0x3A,
+ 0x17, 0xCC, 0x04, 0xA3, 0x69, 0x88, 0x16, 0x97,
+ 0x43, 0x5E, 0x44, 0xFE, 0xD5, 0xFF, 0x9A, 0xF5,
+ 0x7B, 0x9E, 0x89, 0x0D, 0x4D, 0x5C, 0x64, 0x70,
+ 0x98, 0x85, 0xD4, 0x8A, 0xE4, 0x06, 0x90, 0xEC,
+ 0x04, 0x3B, 0xAA, 0xE9, 0x70, 0x57, 0x96, 0xE4,
+ 0xA9, 0xFF, 0x5A, 0x4B, 0x8D, 0x8B, 0x36, 0xD7,
+ 0xF3, 0xFE, 0x57, 0xCC, 0x6C, 0xFD, 0x6C, 0xD0,
+ 0x05, 0xCD, 0x38, 0x52, 0xA8, 0x5E, 0x94, 0xCE,
+ 0x6B, 0xCD, 0x90, 0xD0, 0xD0, 0x78, 0x39, 0xCE,
+ 0x09, 0x73, 0x35, 0x44, 0xCA, 0x8E, 0x35, 0x08,
+ 0x43, 0x24, 0x85, 0x50, 0x92, 0x2A, 0xC1, 0x28,
+ 0x18
+ },
+ },
+ {/*key buffers*/
+ {0xd3, 0xc5, 0xd5, 0x92, 0x32, 0x7f, 0xb1, 0x1c, 0x40, 0x35, 0xc6,
+ 0x68, 0x0a, 0xf8, 0xc6, 0xd1},
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49,
+ 0x10, 0x48, 0x81, 0xFF, 0x48},
+ {0x5A, 0xCB, 0x1D, 0x64, 0x4C, 0x0D, 0x51, 0x20, 0x4E, 0xA5, 0xF1,
+ 0x45, 0x10, 0x10, 0xD8, 0x52},
+ {0xEF, 0xA8, 0xB2, 0x22, 0x9E, 0x72, 0x0C, 0x2A, 0x7C, 0x36, 0xEA,
+ 0x55, 0xE9, 0x60, 0x56, 0x95},
+ {0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C, 0x40, 0x35, 0xC6,
+ 0x68, 0x0A, 0xF8, 0xC6, 0xD1},
+ {0x60, 0x90, 0xEA, 0xE0, 0x4C, 0x83, 0x70, 0x6E, 0xEC, 0xBF, 0x65,
+ 0x2B, 0xE8, 0xE3, 0x65, 0x66}},
+ {/* IV buffers*/
+ {0x39, 0x8a, 0x59, 0xb4, 0xac, 0x00, 0x00, 0x00, 0x39, 0x8a, 0x59,
+ 0xb4, 0xac, 0x00, 0x00, 0x00},
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00, 0x72, 0xA4, 0xF2,
+ 0x0F, 0x64, 0x00, 0x00, 0x00}, /* test 1 */
+ {0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00, 0xFA, 0x55, 0x6B,
+ 0x26, 0x1C, 0x00, 0x00, 0x00}, /* test 3 */
+ {0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00, 0xE2, 0x8B, 0xCF,
+ 0x7B, 0xC0, 0x00, 0x00, 0x00}, /* test 2 */
+ {0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00, 0x39, 0x8A, 0x59,
+ 0xB4, 0x2C, 0x00, 0x00, 0x00}, /* test 4 */
+ {0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00, 0x72, 0xA4, 0xF2,
+ 0x0F, 0x48, 0x00, 0x00, 0x00}}, /* test 5 */
+ { /* IV gen params*/
+ {0x0, 0x0, 0x0}, /* N/A - not part of test data */
+ {0x72A4F20F, 0x0C, 0x1},
+ {0xFA556B26, 0x03, 0x1},
+ {0xE28BCF7B, 0x18, 0x0},
+ {0x398A59B4, 0x05, 0x1},
+ {0x72A4F20F, 0x09, 0x0},
+ }
+};
+
+static cipher_test_vector_t snow3g_f8_vectors[] = {
+ {// SCPM test vector
+ /*dataLenInBytes*/
+ 32,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0x98, 0x1b, 0xa6, 0x82, 0x4c, 0x1b, 0xfb, 0x1a, 0xb4, 0x85, 0x47,
+ 0x20, 0x29, 0xb7, 0x1d, 0x80, 0x8c, 0xe3, 0x3e, 0x2c, 0xc3, 0xc0,
+ 0xb5, 0xfc, 0x1f, 0x3d, 0xe8, 0xa6, 0xdc, 0x66, 0xb1, 0xf0},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {
+ 0x5d, 0x5b, 0xfe, 0x75, 0xeb, 0x04, 0xf6, 0x8c,
+ 0xe0, 0xa1, 0x23, 0x77, 0xea, 0x00, 0xb3, 0x7d,
+ 0x47, 0xc6, 0xa0, 0xba, 0x06, 0x30, 0x91, 0x55,
+ 0x08, 0x6a, 0x85, 0x9c, 0x43, 0x41, 0xb3, 0x7C,
+ },
+ /*key*/
+ {0xd3, 0xc5, 0xd5, 0x92, 0x32, 0x7f, 0xb1, 0x1c, 0x40, 0x35, 0xc6,
+ 0x68, 0x0a, 0xf8, 0xc6, 0xd1},
+ /*iv*/
+ {0x39, 0x8a, 0x59, 0xb4, 0xac, 0x00, 0x00, 0x00, 0x39, 0x8a, 0x59,
+ 0xb4, 0xac, 0x00, 0x00, 0x00},
+ },
+ {// 3GPP specs Test Set 1
+ /*dataLenInBytes*/
+ 99,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61, 0x47, 0x26, 0x44,
+ 0x6A, 0x6C, 0x38, 0xCE, 0xD1, 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54,
+ 0x30, 0x04, 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92, 0x92,
+ 0x2B, 0x03, 0x45, 0x0D, 0x3A, 0x99, 0x75, 0xE5, 0xBD, 0x2E, 0xA0,
+ 0xEB, 0x55, 0xAD, 0x8E, 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60,
+ 0x20, 0xE9, 0xA1, 0xB2, 0x85, 0xE7, 0x62, 0x79, 0x53, 0x59, 0xB7,
+ 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2, 0x48, 0x45, 0x83, 0xD5, 0xAF,
+ 0xE0, 0x82, 0xAE, 0xE6, 0x38, 0xBF, 0x5F, 0xD5, 0xA6, 0x06, 0x19,
+ 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB, 0x9B, 0x13, 0x48},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {0x8C, 0xEB, 0xA6, 0x29, 0x43, 0xDC, 0xED, 0x3A, 0x09, 0x90, 0xB0,
+ 0x6E, 0xA1, 0xB0, 0xA2, 0xC4, 0xFB, 0x3C, 0xED, 0xC7, 0x1B, 0x36,
+ 0x9F, 0x42, 0xBA, 0x64, 0xC1, 0xEB, 0x66, 0x65, 0xE7, 0x2A, 0xA1,
+ 0xC9, 0xBB, 0x0D, 0xEA, 0xA2, 0x0F, 0xE8, 0x60, 0x58, 0xB8, 0xBA,
+ 0xEE, 0x2C, 0x2E, 0x7F, 0x0B, 0xEC, 0xCE, 0x48, 0xB5, 0x29, 0x32,
+ 0xA5, 0x3C, 0x9D, 0x5F, 0x93, 0x1A, 0x3A, 0x7C, 0x53, 0x22, 0x59,
+ 0xAF, 0x43, 0x25, 0xE2, 0xA6, 0x5E, 0x30, 0x84, 0xAD, 0x5F, 0x6A,
+ 0x51, 0x3B, 0x7B, 0xDD, 0xC1, 0xB6, 0x5F, 0x0A, 0xA0, 0xD9, 0x7A,
+ 0x05, 0x3D, 0xB5, 0x5A, 0x88, 0xC4, 0xC4, 0xF9, 0x60, 0x5E, 0x41},
+ /*key*/
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C, 0x49,
+ 0x10, 0x48, 0x81, 0xFF, 0x48},
+ /*iv*/
+ {0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00, 0x72, 0xA4, 0xF2,
+ 0x0F, 0x64, 0x00, 0x00, 0x00}},
+ {// 3GPP specs Test Set 3
+ /*dataLenInBytes*/
+ 15,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4, 0x57, 0xA4, 0x9D,
+ 0x42, 0x14, 0x07, 0xE8},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {0xBA, 0x0F, 0x31, 0x30, 0x03, 0x34, 0xC5, 0x6B, 0x52, 0xA7, 0x49,
+ 0x7C, 0xBA, 0xC0, 0x46},
+ /*key*/
+ {0x5A, 0xCB, 0x1D, 0x64, 0x4C, 0x0D, 0x51, 0x20, 0x4E, 0xA5, 0xF1,
+ 0x45, 0x10, 0x10, 0xD8, 0x52},
+ /*iv*/
+ {0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00, 0xFA, 0x55, 0x6B,
+ 0x26, 0x1C, 0x00, 0x00, 0x00}},
+ {// 3GPP specs Test Set 2
+ /*dataLenInBytes*/
+ 63,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A, 0x43, 0xFD, 0x3F,
+ 0x57, 0xE3, 0x76, 0x07, 0xAB, 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1,
+ 0xBB, 0xDA, 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D, 0x1B,
+ 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50, 0x36, 0x7F, 0xA3, 0x6C,
+ 0xE3, 0xBC, 0x68, 0xF1, 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B,
+ 0x02, 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {0xE0, 0xDA, 0x15, 0xCA, 0x8E, 0x25, 0x54, 0xF5, 0xE5, 0x6C, 0x94,
+ 0x68, 0xDC, 0x6C, 0x7C, 0x12, 0x9C, 0x56, 0x8A, 0xA5, 0x03, 0x23,
+ 0x17, 0xE0, 0x4E, 0x07, 0x29, 0x64, 0x6C, 0xAB, 0xEF, 0xA6, 0x89,
+ 0x86, 0x4C, 0x41, 0x0F, 0x24, 0xF9, 0x19, 0xE6, 0x1E, 0x3D, 0xFD,
+ 0xFA, 0xD7, 0x7E, 0x56, 0x0D, 0xB0, 0xA9, 0xCD, 0x36, 0xC3, 0x4A,
+ 0xE4, 0x18, 0x14, 0x90, 0xB2, 0x9F, 0x5F, 0xA2},
+ /*key*/
+ {0xEF, 0xA8, 0xB2, 0x22, 0x9E, 0x72, 0x0C, 0x2A, 0x7C, 0x36, 0xEA,
+ 0x55, 0xE9, 0x60, 0x56, 0x95},
+ /*iv*/
+ {0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00, 0xE2, 0x8B, 0xCF,
+ 0x7B, 0xC0, 0x00, 0x00, 0x00}},
+ {// 3GPP specs Test Set 4
+ /*dataLenInBytes*/
+ 31,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A, 0xB4, 0x85, 0x47,
+ 0x20, 0x29, 0xB7, 0x1D, 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xC3, 0xC0,
+ 0xB5, 0xFC, 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x66, 0xB1},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {0x98, 0x9B, 0x71, 0x9C, 0xDC, 0x33, 0xCE, 0xB7, 0xCF, 0x27, 0x6A,
+ 0x52, 0x82, 0x7C, 0xEF, 0x94, 0xA5, 0x6C, 0x40, 0xC0, 0xAB, 0x9D,
+ 0x81, 0xF7, 0xA2, 0xA9, 0xBA, 0xC6, 0x0E, 0x11, 0xC4, 0xB6},
+ /*key*/
+ {0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C, 0x40, 0x35, 0xC6,
+ 0x68, 0x0A, 0xF8, 0xC6, 0xD1},
+ /*iv*/
+ {0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00, 0x39, 0x8A, 0x59,
+ 0xB4, 0x2C, 0x00, 0x00, 0x0}},
+ {// 3GPP specs Test Set 5
+ /*dataLenInBytes*/
+ 104,
+ /*keyLenInBytes*/
+ 16,
+ /*ivLenInBytes*/
+ 16,
+ /*plaintext*/
+ {0x40, 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x42, 0x86, 0xB2,
+ 0x99, 0x78, 0x3D, 0xAF, 0x44, 0x2C, 0x09, 0x9F, 0x7A, 0xB0, 0xF5,
+ 0x8D, 0x5C, 0x8E, 0x46, 0xB1, 0x04, 0xF0, 0x8F, 0x01, 0xB4, 0x1A,
+ 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D, 0x36, 0xBD, 0x1A, 0x3D,
+ 0x90, 0xDC, 0x3A, 0x41, 0xB4, 0x6D, 0x51, 0x67, 0x2A, 0xC4, 0xC9,
+ 0x66, 0x3A, 0x2B, 0xE0, 0x63, 0xDA, 0x4B, 0xC8, 0xD2, 0x80, 0x8C,
+ 0xE3, 0x3E, 0x2C, 0xCC, 0xBF, 0xC6, 0x34, 0xE1, 0xB2, 0x59, 0x06,
+ 0x08, 0x76, 0xA0, 0xFB, 0xB5, 0xA4, 0x37, 0xEB, 0xCC, 0x8D, 0x31,
+ 0xC1, 0x9E, 0x44, 0x54, 0x31, 0x87, 0x45, 0xE3, 0x98, 0x76, 0x45,
+ 0x98, 0x7A, 0x98, 0x6F, 0x2C},
+ /*ciphertext*/
+ /* taken from 3GPP implementors test data, the last 4bits of ciphertext
+ *modified
+ ** to get around input buffer size in bytes instead of bits */
+ {0x58, 0x92, 0xBB, 0xA8, 0x8B, 0xBB, 0xCA, 0xAE, 0xAE, 0x76, 0x9A,
+ 0xA0, 0x6B, 0x68, 0x3D, 0x3A, 0x17, 0xCC, 0x04, 0xA3, 0x69, 0x88,
+ 0x16, 0x97, 0x43, 0x5E, 0x44, 0xFE, 0xD5, 0xFF, 0x9A, 0xF5, 0x7B,
+ 0x9E, 0x89, 0x0D, 0x4D, 0x5C, 0x64, 0x70, 0x98, 0x85, 0xD4, 0x8A,
+ 0xE4, 0x06, 0x90, 0xEC, 0x04, 0x3B, 0xAA, 0xE9, 0x70, 0x57, 0x96,
+ 0xE4, 0xA9, 0xFF, 0x5A, 0x4B, 0x8D, 0x8B, 0x36, 0xD7, 0xF3, 0xFE,
+ 0x57, 0xCC, 0x6C, 0xFD, 0x6C, 0xD0, 0x05, 0xCD, 0x38, 0x52, 0xA8,
+ 0x5E, 0x94, 0xCE, 0x6B, 0xCD, 0x90, 0xD0, 0xD0, 0x78, 0x39, 0xCE,
+ 0x09, 0x73, 0x35, 0x44, 0xCA, 0x8E, 0x35, 0x08, 0x43, 0x24, 0x85,
+ 0x50, 0x92, 0x2A, 0xC1, 0x28},
+ /*key*/
+ {0x60, 0x90, 0xEA, 0xE0, 0x4C, 0x83, 0x70, 0x6E, 0xEC, 0xBF, 0x65,
+ 0x2B, 0xE8, 0xE3, 0x65, 0x66},
+ /*iv*/
+ {0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00, 0x72, 0xA4, 0xF2,
+ 0x0F, 0x48, 0x00, 0x00, 0x00}}};
+
+static hash_test_vector_t snow_f9_vectors[] = {
+ {
+ // 3GPP specs Test Set 1
+ /*input*/
+ {0x6B, 0x22, 0x77, 0x37, 0x29, 0x6F, 0x39, 0x3C,
+ 0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87, 0xE2, 0xE8,
+ 0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE0},
+ /*lengthinbits*/
+ 189,
+ /*key*/
+ {0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00, 0x95, 0x2C,
+ 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x2B, 0xCE, 0x18, 0x20},
+ /*iv*/
+ {0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49, 0x38, 0xA6,
+ 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 0,
+ /* IV params */
+ {0x38A6F056, 0x05D2EC49, 0x0},
+ },
+ {
+ // 3GPP specs Test Set 2
+ /*input*/
+ {0xB5, 0x92, 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0,
+ 0x0B, 0x73, 0x71, 0x09, 0xF8, 0xB6, 0xC8, 0xDD,
+ 0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33, 0x98, 0x1C,
+ 0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC0},
+ /*lengthinbits*/
+ 254,
+ /*key*/
+ {0xD4, 0x2F, 0x68, 0x24, 0x28, 0x20, 0x1C, 0xAF, 0xCD, 0x9F,
+ 0x97, 0x94, 0x5E, 0x6D, 0xE7, 0xB7},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0xFC, 0x7B, 0x18, 0xBD},
+ /*iv*/
+ {0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2, 0xBE, 0xDC,
+ 0x87, 0xE2, 0xA4, 0xF2, 0x58, 0xE2},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 1,
+ /* IV params */
+ {0x3EDC87E2, 0xA4F2D8E2, 0x1},
+ },
+ {
+ // 3GPP specs Test Set 3
+ /*input*/
+ {0x59, 0x32, 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA, 0x33, 0xD8,
+ 0xAC, 0x18, 0x8A, 0xC5, 0x4F, 0x34, 0x6F, 0xAD, 0x10, 0xBF,
+ 0x9D, 0xEE, 0x29, 0x20, 0xB4, 0x3B, 0xD0, 0xC5, 0x3A, 0x91,
+ 0x5C, 0xB7, 0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF2},
+ /*lengthinbits*/
+ 319,
+ /*key*/
+ {0xFD, 0xB9, 0xCF, 0xDF, 0x28, 0x93, 0x6C, 0xC4, 0x83, 0xA3,
+ 0x18, 0x69, 0xD8, 0x1B, 0x8F, 0xAB},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x02, 0xF1, 0xFA, 0xAF},
+ /*iv*/
+ {0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A, 0xB6, 0xAF,
+ 0x61, 0x44, 0x98, 0x38, 0x70, 0x3A},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 1,
+ /* IV params */
+ {0x36AF6144, 0x9838F03A, 0x1},
+ },
+ {
+ // 3GPP specs Test Set 4
+ /*input*/
+ {0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2, 0x78, 0x83,
+ 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1, 0x72, 0xBD, 0x97, 0x0C,
+ 0x14, 0x73, 0xE1, 0x29, 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99,
+ 0xAA, 0xA0, 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20,
+ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09},
+ /*lengthinbits*/
+ 384,
+ /*key*/
+ {0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9, 0x1E, 0x26,
+ 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x38, 0xB5, 0x54, 0xC0},
+ /*iv*/
+ {0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD, 0x94, 0x79,
+ 0x3E, 0x41, 0x03, 0x97, 0x68, 0xFD},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 1,
+ /* IV params */
+ {0x14793E41, 0x0397E8FD, 0x1},
+ },
+ {
+ // 3GPP specs Test Set 5
+ /*input*/
+ {0x10, 0xBF, 0xFF, 0x83, 0x9E, 0x0C, 0x71, 0x65, 0x8D, 0xBB,
+ 0x2D, 0x17, 0x07, 0xE1, 0x45, 0x72, 0x4F, 0x41, 0xC1, 0x6F,
+ 0x48, 0xBF, 0x40, 0x3C, 0x3B, 0x18, 0xE3, 0x8F, 0xD5, 0xD1,
+ 0x66, 0x3B, 0x6F, 0x6D, 0x90, 0x01, 0x93, 0xE3, 0xCE, 0xA8,
+ 0xBB, 0x4F, 0x1B, 0x4F, 0x5B, 0xE8, 0x22, 0x03, 0x22, 0x32,
+ 0xA7, 0x8D, 0x7D, 0x75, 0x23, 0x8D, 0x5E, 0x6D, 0xAE, 0xCD,
+ 0x3B, 0x43, 0x22, 0xCF, 0x59, 0xBC, 0x7E, 0xA8, 0x4A, 0xB1,
+ 0x88, 0x11, 0xB5, 0xBF, 0xB7, 0xBC, 0x55, 0x3F, 0x4F, 0xE4,
+ 0x44, 0x78, 0xCE, 0x28, 0x7A, 0x14, 0x87, 0x99, 0x90, 0xD1,
+ 0x8D, 0x12, 0xCA, 0x79, 0xD2, 0xC8, 0x55, 0x14, 0x90, 0x21,
+ 0xCD, 0x5C, 0xE8, 0xCA, 0x03, 0x71, 0xCA, 0x04, 0xFC, 0xCE,
+ 0x14, 0x3E, 0x3D, 0x7C, 0xFE, 0xE9, 0x45, 0x85, 0xB5, 0x88,
+ 0x5C, 0xAC, 0x46, 0x06, 0x8B},
+ /*lengthinbits*/
+ 1000,
+ /*key*/
+ {0xF4, 0xEB, 0xEC, 0x69, 0xE7, 0x3E, 0xAF, 0x2E, 0xB2, 0xCF,
+ 0x6A, 0xF4, 0xB3, 0x12, 0x0F, 0xFD},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x06, 0x17, 0x45, 0xAE},
+ /*iv*/
+ {0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37, 0xA9, 0x6F,
+ 0x39, 0x3C, 0x6B, 0x22, 0xF7, 0x37},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 1,
+ /* IV params */
+ {0x296F393C, 0x6B227737, 0x1},
+ },
+ {// 3GPP specs Test Set 6
+ /*input*/
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0xE0, 0x95, 0x80, 0x45, 0xF3, 0xA0,
+ 0xBB, 0xA4, 0xE3, 0x96, 0x83, 0x46, 0xF0, 0xA3, 0xB8, 0xA7, 0xC0,
+ 0x2A, 0x01, 0x8A, 0xE6, 0x40, 0x76, 0x52, 0x26, 0xB9, 0x87, 0xC9,
+ 0x13, 0xE6, 0xCB, 0xF0, 0x83, 0x57, 0x00, 0x16, 0xCF, 0x83, 0xEF,
+ 0xBC, 0x61, 0xC0, 0x82, 0x51, 0x3E, 0x21, 0x56, 0x1A, 0x42, 0x7C,
+ 0x00, 0x9D, 0x28, 0xC2, 0x98, 0xEF, 0xAC, 0xE7, 0x8E, 0xD6, 0xD5,
+ 0x6C, 0x2D, 0x45, 0x05, 0xAD, 0x03, 0x2E, 0x9C, 0x04, 0xDC, 0x60,
+ 0xE7, 0x3A, 0x81, 0x69, 0x6D, 0xA6, 0x65, 0xC6, 0xC4, 0x86, 0x03,
+ 0xA5, 0x7B, 0x45, 0xAB, 0x33, 0x22, 0x15, 0x85, 0xE6, 0x8E, 0xE3,
+ 0x16, 0x91, 0x87, 0xFB, 0x02, 0x39, 0x52, 0x86, 0x32, 0xDD, 0x65,
+ 0x6C, 0x80, 0x7E, 0xA3, 0x24, 0x8B, 0x7B, 0x46, 0xD0, 0x02, 0xB2,
+ 0xB5, 0xC7, 0x45, 0x8E, 0xB8, 0x5B, 0x9C, 0xE9, 0x58, 0x79, 0xE0,
+ 0x34, 0x08, 0x59, 0x05, 0x5E, 0x3B, 0x0A, 0xBB, 0xC3, 0xEA, 0xCE,
+ 0x87, 0x19, 0xCA, 0xA8, 0x02, 0x65, 0xC9, 0x72, 0x05, 0xD5, 0xDC,
+ 0x4B, 0xCC, 0x90, 0x2F, 0xE1, 0x83, 0x96, 0x29, 0xED, 0x71, 0x32,
+ 0x8A, 0x0F, 0x04, 0x49, 0xF5, 0x88, 0x55, 0x7E, 0x68, 0x98, 0x86,
+ 0x0E, 0x04, 0x2A, 0xEC, 0xD8, 0x4B, 0x24, 0x04, 0xC2, 0x12, 0xC9,
+ 0x22, 0x2D, 0xA5, 0xBF, 0x8A, 0x89, 0xEF, 0x67, 0x97, 0x87, 0x0C,
+ 0xF5, 0x07, 0x71, 0xA6, 0x0F, 0x66, 0xA2, 0xEE, 0x62, 0x85, 0x36,
+ 0x57, 0xAD, 0xDF, 0x04, 0xCD, 0xDE, 0x07, 0xFA, 0x41, 0x4E, 0x11,
+ 0xF1, 0x2B, 0x4D, 0x81, 0xB9, 0xB4, 0xE8, 0xAC, 0x53, 0x8E, 0xA3,
+ 0x06, 0x66, 0x68, 0x8D, 0x88, 0x1F, 0x6C, 0x34, 0x84, 0x21, 0x99,
+ 0x2F, 0x31, 0xB9, 0x4F, 0x88, 0x06, 0xED, 0x8F, 0xCC, 0xFF, 0x4C,
+ 0x91, 0x23, 0xB8, 0x96, 0x42, 0x52, 0x7A, 0xD6, 0x13, 0xB1, 0x09,
+ 0xBF, 0x75, 0x16, 0x74, 0x85, 0xF1, 0x26, 0x8B, 0xF8, 0x84, 0xB4,
+ 0xCD, 0x23, 0xD2, 0x9A, 0x09, 0x34, 0x92, 0x57, 0x03, 0xD6, 0x34,
+ 0x09, 0x8F, 0x77, 0x67, 0xF1, 0xBE, 0x74, 0x91, 0xE7, 0x08, 0xA8,
+ 0xBB, 0x94, 0x9A, 0x38, 0x73, 0x70, 0x8A, 0xEF, 0x4A, 0x36, 0x23,
+ 0x9E, 0x50, 0xCC, 0x08, 0x23, 0x5C, 0xD5, 0xED, 0x6B, 0xBE, 0x57,
+ 0x86, 0x68, 0xA1, 0x7B, 0x58, 0xC1, 0x17, 0x1D, 0x0B, 0x90, 0xE8,
+ 0x13, 0xA9, 0xE4, 0xF5, 0x8A, 0x89, 0xD7, 0x19, 0xB1, 0x10, 0x42,
+ 0xD6, 0x36, 0x0B, 0x1B, 0x0F, 0x52, 0xDE, 0xB7, 0x30, 0xA5, 0x8D,
+ 0x58, 0xFA, 0xF4, 0x63, 0x15, 0x95, 0x4B, 0x0A, 0x87, 0x26, 0x91,
+ 0x47, 0x59, 0x77, 0xDC, 0x88, 0xC0, 0xD7, 0x33, 0xFE, 0xFF, 0x54,
+ 0x60, 0x0A, 0x0C, 0xC1, 0xD0, 0x30, 0x0A, 0xAA, 0xEB, 0x94, 0x57,
+ 0x2C, 0x6E, 0x95, 0xB0, 0x1A, 0xE9, 0x0D, 0xE0, 0x4F, 0x1D, 0xCE,
+ 0x47, 0xF8, 0x7E, 0x8F, 0xA7, 0xBE, 0xBF, 0x77, 0xE1, 0xDB, 0xC2,
+ 0x0D, 0x6B, 0xA8, 0x5C, 0xB9, 0x14, 0x3D, 0x51, 0x8B, 0x28, 0x5D,
+ 0xFA, 0x04, 0xB6, 0x98, 0xBF, 0x0C, 0xF7, 0x81, 0x9F, 0x20, 0xFA,
+ 0x7A, 0x28, 0x8E, 0xB0, 0x70, 0x3D, 0x99, 0x5C, 0x59, 0x94, 0x0C,
+ 0x7C, 0x66, 0xDE, 0x57, 0xA9, 0xB7, 0x0F, 0x82, 0x37, 0x9B, 0x70,
+ 0xE2, 0x03, 0x1E, 0x45, 0x0F, 0xCF, 0xD2, 0x18, 0x13, 0x26, 0xFC,
+ 0xD2, 0x8D, 0x88, 0x23, 0xBA, 0xAA, 0x80, 0xDF, 0x6E, 0x0F, 0x44,
+ 0x35, 0x59, 0x64, 0x75, 0x39, 0xFD, 0x89, 0x07, 0xC0, 0xFF, 0xD9,
+ 0xD7, 0x9C, 0x13, 0x0E, 0xD8, 0x1C, 0x9A, 0xFD, 0x9B, 0x7E, 0x84,
+ 0x8C, 0x9F, 0xED, 0x38, 0x44, 0x3D, 0x5D, 0x38, 0x0E, 0x53, 0xFB,
+ 0xDB, 0x8A, 0xC8, 0xC3, 0xD3, 0xF0, 0x68, 0x76, 0x05, 0x4F, 0x12,
+ 0x24, 0x61, 0x10, 0x7D, 0xE9, 0x2F, 0xEA, 0x09, 0xC6, 0xF6, 0x92,
+ 0x3A, 0x18, 0x8D, 0x53, 0xAF, 0xE5, 0x4A, 0x10, 0xF6, 0x0E, 0x6E,
+ 0x9D, 0x5A, 0x03, 0xD9, 0x96, 0xB5, 0xFB, 0xC8, 0x20, 0xF8, 0xA6,
+ 0x37, 0x11, 0x6A, 0x27, 0xAD, 0x04, 0xB4, 0x44, 0xA0, 0x93, 0x2D,
+ 0xD6, 0x0F, 0xBD, 0x12, 0x67, 0x1C, 0x11, 0xE1, 0xC0, 0xEC, 0x73,
+ 0xE7, 0x89, 0x87, 0x9F, 0xAA, 0x3D, 0x42, 0xC6, 0x4D, 0x20, 0xCD,
+ 0x12, 0x52, 0x74, 0x2A, 0x37, 0x68, 0xC2, 0x5A, 0x90, 0x15, 0x85,
+ 0x88, 0x8E, 0xCE, 0xE1, 0xE6, 0x12, 0xD9, 0x93, 0x6B, 0x40, 0x3B,
+ 0x07, 0x75, 0x94, 0x9A, 0x66, 0xCD, 0xFD, 0x99, 0xA2, 0x9B, 0x13,
+ 0x45, 0xBA, 0xA8, 0xD9, 0xD5, 0x40, 0x0C, 0x91, 0x02, 0x4B, 0x0A,
+ 0x60, 0x73, 0x63, 0xB0, 0x13, 0xCE, 0x5D, 0xE9, 0xAE, 0x86, 0x9D,
+ 0x3B, 0x8D, 0x95, 0xB0, 0x57, 0x0B, 0x3C, 0x2D, 0x39, 0x14, 0x22,
+ 0xD3, 0x24, 0x50, 0xCB, 0xCF, 0xAE, 0x96, 0x65, 0x22, 0x86, 0xE9,
+ 0x6D, 0xEC, 0x12, 0x14, 0xA9, 0x34, 0x65, 0x27, 0x98, 0x0A, 0x81,
+ 0x92, 0xEA, 0xC1, 0xC3, 0x9A, 0x3A, 0xAF, 0x6F, 0x15, 0x35, 0x1D,
+ 0xA6, 0xBE, 0x76, 0x4D, 0xF8, 0x97, 0x72, 0xEC, 0x04, 0x07, 0xD0,
+ 0x6E, 0x44, 0x15, 0xBE, 0xFA, 0xE7, 0xC9, 0x25, 0x80, 0xDF, 0x9B,
+ 0xF5, 0x07, 0x49, 0x7C, 0x8F, 0x29, 0x95, 0x16, 0x0D, 0x4E, 0x21,
+ 0x8D, 0xAA, 0xCB, 0x02, 0x94, 0x4A, 0xBF, 0x83, 0x34, 0x0C, 0xE8,
+ 0xBE, 0x16, 0x86, 0xA9, 0x60, 0xFA, 0xF9, 0x0E, 0x2D, 0x90, 0xC5,
+ 0x5C, 0xC6, 0x47, 0x5B, 0xAB, 0xC3, 0x17, 0x1A, 0x80, 0xA3, 0x63,
+ 0x17, 0x49, 0x54, 0x95, 0x5D, 0x71, 0x01, 0xDA, 0xB1, 0x6A, 0xE8,
+ 0x17, 0x91, 0x67, 0xE2, 0x14, 0x44, 0xB4, 0x43, 0xA9, 0xEA, 0xAA,
+ 0x7C, 0x91, 0xDE, 0x36, 0xD1, 0x18, 0xC3, 0x9D, 0x38, 0x9F, 0x8D,
+ 0xD4, 0x46, 0x9A, 0x84, 0x6C, 0x9A, 0x26, 0x2B, 0xF7, 0xFA, 0x18,
+ 0x48, 0x7A, 0x79, 0xE8, 0xDE, 0x11, 0x69, 0x9E, 0x0B, 0x8F, 0xDF,
+ 0x55, 0x7C, 0xB4, 0x87, 0x19, 0xD4, 0x53, 0xBA, 0x71, 0x30, 0x56,
+ 0x10, 0x9B, 0x93, 0xA2, 0x18, 0xC8, 0x96, 0x75, 0xAC, 0x19, 0x5F,
+ 0xB4, 0xFB, 0x06, 0x63, 0x9B, 0x37, 0x97, 0x14, 0x49, 0x55, 0xB3,
+ 0xC9, 0x32, 0x7D, 0x1A, 0xEC, 0x00, 0x3D, 0x42, 0xEC, 0xD0, 0xEA,
+ 0x98, 0xAB, 0xF1, 0x9F, 0xFB, 0x4A, 0xF3, 0x56, 0x1A, 0x67, 0xE7,
+ 0x7C, 0x35, 0xBF, 0x15, 0xC5, 0x9C, 0x24, 0x12, 0xDA, 0x88, 0x1D,
+ 0xB0, 0x2B, 0x1B, 0xFB, 0xCE, 0xBF, 0xAC, 0x51, 0x52, 0xBC, 0x99,
+ 0xBC, 0x3F, 0x1D, 0x15, 0xF7, 0x71, 0x00, 0x1B, 0x70, 0x29, 0xFE,
+ 0xDB, 0x02, 0x8F, 0x8B, 0x85, 0x2B, 0xC4, 0x40, 0x7E, 0xB8, 0x3F,
+ 0x89, 0x1C, 0x9C, 0xA7, 0x33, 0x25, 0x4F, 0xDD, 0x1E, 0x9E, 0xDB,
+ 0x56, 0x91, 0x9C, 0xE9, 0xFE, 0xA2, 0x1C, 0x17, 0x40, 0x72, 0x52,
+ 0x1C, 0x18, 0x31, 0x9A, 0x54, 0xB5, 0xD4, 0xEF, 0xBE, 0xBD, 0xDF,
+ 0x1D, 0x8B, 0x69, 0xB1, 0xCB, 0xF2, 0x5F, 0x48, 0x9F, 0xCC, 0x98,
+ 0x13, 0x72, 0x54, 0x7C, 0xF4, 0x1D, 0x00, 0x8E, 0xF0, 0xBC, 0xA1,
+ 0x92, 0x6F, 0x93, 0x4B, 0x73, 0x5E, 0x09, 0x0B, 0x3B, 0x25, 0x1E,
+ 0xB3, 0x3A, 0x36, 0xF8, 0x2E, 0xD9, 0xB2, 0x9C, 0xF4, 0xCB, 0x94,
+ 0x41, 0x88, 0xFA, 0x0E, 0x1E, 0x38, 0xDD, 0x77, 0x8F, 0x7D, 0x1C,
+ 0x9D, 0x98, 0x7B, 0x28, 0xD1, 0x32, 0xDF, 0xB9, 0x73, 0x1F, 0xA4,
+ 0xF4, 0xB4, 0x16, 0x93, 0x5B, 0xE4, 0x9D, 0xE3, 0x05, 0x16, 0xAF,
+ 0x35, 0x78, 0x58, 0x1F, 0x2F, 0x13, 0xF5, 0x61, 0xC0, 0x66, 0x33,
+ 0x61, 0x94, 0x1E, 0xAB, 0x24, 0x9A, 0x4B, 0xC1, 0x23, 0xF8, 0xD1,
+ 0x5C, 0xD7, 0x11, 0xA9, 0x56, 0xA1, 0xBF, 0x20, 0xFE, 0x6E, 0xB7,
+ 0x8A, 0xEA, 0x23, 0x73, 0x36, 0x1D, 0xA0, 0x42, 0x6C, 0x79, 0xA5,
+ 0x30, 0xC3, 0xBB, 0x1D, 0xE0, 0xC9, 0x97, 0x22, 0xEF, 0x1F, 0xDE,
+ 0x39, 0xAC, 0x2B, 0x00, 0xA0, 0xA8, 0xEE, 0x7C, 0x80, 0x0A, 0x08,
+ 0xBC, 0x22, 0x64, 0xF8, 0x9F, 0x4E, 0xFF, 0xE6, 0x27, 0xAC, 0x2F,
+ 0x05, 0x31, 0xFB, 0x55, 0x4F, 0x6D, 0x21, 0xD7, 0x4C, 0x59, 0x0A,
+ 0x70, 0xAD, 0xFA, 0xA3, 0x90, 0xBD, 0xFB, 0xB3, 0xD6, 0x8E, 0x46,
+ 0x21, 0x5C, 0xAB, 0x18, 0x7D, 0x23, 0x68, 0xD5, 0xA7, 0x1F, 0x5E,
+ 0xBE, 0xC0, 0x81, 0xCD, 0x3B, 0x20, 0xC0, 0x82, 0xDB, 0xE4, 0xCD,
+ 0x2F, 0xAC, 0xA2, 0x87, 0x73, 0x79, 0x5D, 0x6B, 0x0C, 0x10, 0x20,
+ 0x4B, 0x65, 0x9A, 0x93, 0x9E, 0xF2, 0x9B, 0xBE, 0x10, 0x88, 0x24,
+ 0x36, 0x24, 0x42, 0x99, 0x27, 0xA7, 0xEB, 0x57, 0x6D, 0xD3, 0xA0,
+ 0x0E, 0xA5, 0xE0, 0x1A, 0xF5, 0xD4, 0x75, 0x83, 0xB2, 0x27, 0x2C,
+ 0x0C, 0x16, 0x1A, 0x80, 0x65, 0x21, 0xA1, 0x6F, 0xF9, 0xB0, 0xA7,
+ 0x22, 0xC0, 0xCF, 0x26, 0xB0, 0x25, 0xD5, 0x83, 0x6E, 0x22, 0x58,
+ 0xA4, 0xF7, 0xD4, 0x77, 0x3A, 0xC8, 0x01, 0xE4, 0x26, 0x3B, 0xC2,
+ 0x94, 0xF4, 0x3D, 0xEF, 0x7F, 0xA8, 0x70, 0x3F, 0x3A, 0x41, 0x97,
+ 0x46, 0x35, 0x25, 0x88, 0x76, 0x52, 0xB0, 0xB2, 0xA4, 0xA2, 0xA7,
+ 0xCF, 0x87, 0xF0, 0x09, 0x14, 0x87, 0x1E, 0x25, 0x03, 0x91, 0x13,
+ 0xC7, 0xE1, 0x61, 0x8D, 0xA3, 0x40, 0x64, 0xB5, 0x7A, 0x43, 0xC4,
+ 0x63, 0x24, 0x9F, 0xB8, 0xD0, 0x5E, 0x0F, 0x26, 0xF4, 0xA6, 0xD8,
+ 0x49, 0x72, 0xE7, 0xA9, 0x05, 0x48, 0x24, 0x14, 0x5F, 0x91, 0x29,
+ 0x5C, 0xDB, 0xE3, 0x9A, 0x6F, 0x92, 0x0F, 0xAC, 0xC6, 0x59, 0x71,
+ 0x2B, 0x46, 0xA5, 0x4B, 0xA2, 0x95, 0xBB, 0xE6, 0xA9, 0x01, 0x54,
+ 0xE9, 0x1B, 0x33, 0x98, 0x5A, 0x2B, 0xCD, 0x42, 0x0A, 0xD5, 0xC6,
+ 0x7E, 0xC9, 0xAD, 0x8E, 0xB7, 0xAC, 0x68, 0x64, 0xDB, 0x27, 0x2A,
+ 0x51, 0x6B, 0xC9, 0x4C, 0x28, 0x39, 0xB0, 0xA8, 0x16, 0x9A, 0x6B,
+ 0xF5, 0x8E, 0x1A, 0x0C, 0x2A, 0xDA, 0x8C, 0x88, 0x3B, 0x7B, 0xF4,
+ 0x97, 0xA4, 0x91, 0x71, 0x26, 0x8E, 0xD1, 0x5D, 0xDD, 0x29, 0x69,
+ 0x38, 0x4E, 0x7F, 0xF4, 0xBF, 0x4A, 0xAB, 0x2E, 0xC9, 0xEC, 0xC6,
+ 0x52, 0x9C, 0xF6, 0x29, 0xE2, 0xDF, 0x0F, 0x08, 0xA7, 0x7A, 0x65,
+ 0xAF, 0xA1, 0x2A, 0xA9, 0xB5, 0x05, 0xDF, 0x8B, 0x28, 0x7E, 0xF6,
+ 0xCC, 0x91, 0x49, 0x3D, 0x1C, 0xAA, 0x39, 0x07, 0x6E, 0x28, 0xEF,
+ 0x1E, 0xA0, 0x28, 0xF5, 0x11, 0x8D, 0xE6, 0x1A, 0xE0, 0x2B, 0xB6,
+ 0xAE, 0xFC, 0x33, 0x43, 0xA0, 0x50, 0x29, 0x2F, 0x19, 0x9F, 0x40,
+ 0x18, 0x57, 0xB2, 0xBE, 0xAD, 0x5E, 0x6E, 0xE2, 0xA1, 0xF1, 0x91,
+ 0x02, 0x2F, 0x92, 0x78, 0x01, 0x6F, 0x04, 0x77, 0x91, 0xA9, 0xD1,
+ 0x8D, 0xA7, 0xD2, 0xA6, 0xD2, 0x7F, 0x2E, 0x0E, 0x51, 0xC2, 0xF6,
+ 0xEA, 0x30, 0xE8, 0xAC, 0x49, 0xA0, 0x60, 0x4F, 0x4C, 0x13, 0x54,
+ 0x2E, 0x85, 0xB6, 0x83, 0x81, 0xB9, 0xFD, 0xCF, 0xA0, 0xCE, 0x4B,
+ 0x2D, 0x34, 0x13, 0x54, 0x85, 0x2D, 0x36, 0x02, 0x45, 0xC5, 0x36,
+ 0xB6, 0x12, 0xAF, 0x71, 0xF3, 0xE7, 0x7C, 0x90, 0x95, 0xAE, 0x2D,
+ 0xBD, 0xE5, 0x04, 0xB2, 0x65, 0x73, 0x3D, 0xAB, 0xFE, 0x10, 0xA2,
+ 0x0F, 0xC7, 0xD6, 0xD3, 0x2C, 0x21, 0xCC, 0xC7, 0x2B, 0x8B, 0x34,
+ 0x44, 0xAE, 0x66, 0x3D, 0x65, 0x92, 0x2D, 0x17, 0xF8, 0x2C, 0xAA,
+ 0x2B, 0x86, 0x5C, 0xD8, 0x89, 0x13, 0xD2, 0x91, 0xA6, 0x58, 0x99,
+ 0x02, 0x6E, 0xA1, 0x32, 0x84, 0x39, 0x72, 0x3C, 0x19, 0x8C, 0x36,
+ 0xB0, 0xC3, 0xC8, 0xD0, 0x85, 0xBF, 0xAF, 0x8A, 0x32, 0x0F, 0xDE,
+ 0x33, 0x4B, 0x4A, 0x49, 0x19, 0xB4, 0x4C, 0x2B, 0x95, 0xF6, 0xE8,
+ 0xEC, 0xF7, 0x33, 0x93, 0xF7, 0xF0, 0xD2, 0xA4, 0x0E, 0x60, 0xB1,
+ 0xD4, 0x06, 0x52, 0x6B, 0x02, 0x2D, 0xDC, 0x33, 0x18, 0x10, 0xB1,
+ 0xA5, 0xF7, 0xC3, 0x47, 0xBD, 0x53, 0xED, 0x1F, 0x10, 0x5D, 0x6A,
+ 0x0D, 0x30, 0xAB, 0xA4, 0x77, 0xE1, 0x78, 0x88, 0x9A, 0xB2, 0xEC,
+ 0x55, 0xD5, 0x58, 0xDE, 0xAB, 0x26, 0x30, 0x20, 0x43, 0x36, 0x96,
+ 0x2B, 0x4D, 0xB5, 0xB6, 0x63, 0xB6, 0x90, 0x2B, 0x89, 0xE8, 0x5B,
+ 0x31, 0xBC, 0x6A, 0xF5, 0x0F, 0xC5, 0x0A, 0xCC, 0xB3, 0xFB, 0x9B,
+ 0x57, 0xB6, 0x63, 0x29, 0x70, 0x31, 0x37, 0x8D, 0xB4, 0x78, 0x96,
+ 0xD7, 0xFB, 0xAF, 0x6C, 0x60, 0x0A, 0xDD, 0x2C, 0x67, 0xF9, 0x36,
+ 0xDB, 0x03, 0x79, 0x86, 0xDB, 0x85, 0x6E, 0xB4, 0x9C, 0xF2, 0xDB,
+ 0x3F, 0x7D, 0xA6, 0xD2, 0x36, 0x50, 0xE4, 0x38, 0xF1, 0x88, 0x40,
+ 0x41, 0xB0, 0x13, 0x11, 0x9E, 0x4C, 0x2A, 0xE5, 0xAF, 0x37, 0xCC,
+ 0xCD, 0xFB, 0x68, 0x66, 0x07, 0x38, 0xB5, 0x8B, 0x3C, 0x59, 0xD1,
+ 0xC0, 0x24, 0x84, 0x37, 0x47, 0x2A, 0xBA, 0x1F, 0x35, 0xCA, 0x1F,
+ 0xB9, 0x0C, 0xD7, 0x14, 0xAA, 0x9F, 0x63, 0x55, 0x34, 0xF4, 0x9E,
+ 0x7C, 0x5B, 0xBA, 0x81, 0xC2, 0xB6, 0xB3, 0x6F, 0xDE, 0xE2, 0x1C,
+ 0xA2, 0x7E, 0x34, 0x7F, 0x79, 0x3D, 0x2C, 0xE9, 0x44, 0xED, 0xB2,
+ 0x3C, 0x8C, 0x9B, 0x91, 0x4B, 0xE1, 0x03, 0x35, 0xE3, 0x50, 0xFE,
+ 0xB5, 0x07, 0x03, 0x94, 0xB7, 0xA4, 0xA1, 0x5C, 0x0C, 0xA1, 0x20,
+ 0x28, 0x35, 0x68, 0xB7, 0xBF, 0xC2, 0x54, 0xFE, 0x83, 0x8B, 0x13,
+ 0x7A, 0x21, 0x47, 0xCE, 0x7C, 0x11, 0x3A, 0x3A, 0x4D, 0x65, 0x49,
+ 0x9D, 0x9E, 0x86, 0xB8, 0x7D, 0xBC, 0xC7, 0xF0, 0x3B, 0xBD, 0x3A,
+ 0x3A, 0xB1, 0xAA, 0x24, 0x3E, 0xCE, 0x5B, 0xA9, 0xBC, 0xF2, 0x5F,
+ 0x82, 0x83, 0x6C, 0xFE, 0x47, 0x3B, 0x2D, 0x83, 0xE7, 0xA7, 0x20,
+ 0x1C, 0xD0, 0xB9, 0x6A, 0x72, 0x45, 0x1E, 0x86, 0x3F, 0x6C, 0x3B,
+ 0xA6, 0x64, 0xA6, 0xD0, 0x73, 0xD1, 0xF7, 0xB5, 0xED, 0x99, 0x08,
+ 0x65, 0xD9, 0x78, 0xBD, 0x38, 0x15, 0xD0, 0x60, 0x94, 0xFC, 0x9A,
+ 0x2A, 0xBA, 0x52, 0x21, 0xC2, 0x2D, 0x5A, 0xB9, 0x96, 0x38, 0x9E,
+ 0x37, 0x21, 0xE3, 0xAF, 0x5F, 0x05, 0xBE, 0xDD, 0xC2, 0x87, 0x5E,
+ 0x0D, 0xFA, 0xEB, 0x39, 0x02, 0x1E, 0xE2, 0x7A, 0x41, 0x18, 0x7C,
+ 0xBB, 0x45, 0xEF, 0x40, 0xC3, 0xE7, 0x3B, 0xC0, 0x39, 0x89, 0xF9,
+ 0xA3, 0x0D, 0x12, 0xC5, 0x4B, 0xA7, 0xD2, 0x14, 0x1D, 0xA8, 0xA8,
+ 0x75, 0x49, 0x3E, 0x65, 0x77, 0x6E, 0xF3, 0x5F, 0x97, 0xDE, 0xBC,
+ 0x22, 0x86, 0xCC, 0x4A, 0xF9, 0xB4, 0x62, 0x3E, 0xEE, 0x90, 0x2F,
+ 0x84, 0x0C, 0x52, 0xF1, 0xB8, 0xAD, 0x65, 0x89, 0x39, 0xAE, 0xF7,
+ 0x1F, 0x3F, 0x72, 0xB9, 0xEC, 0x1D, 0xE2, 0x15, 0x88, 0xBD, 0x35,
+ 0x48, 0x4E, 0xA4, 0x44, 0x36, 0x34, 0x3F, 0xF9, 0x5E, 0xAD, 0x6A,
+ 0xB1, 0xD8, 0xAF, 0xB1, 0xB2, 0xA3, 0x03, 0xDF, 0x1B, 0x71, 0xE5,
+ 0x3C, 0x4A, 0xEA, 0x6B, 0x2E, 0x3E, 0x93, 0x72, 0xBE, 0x0D, 0x1B,
+ 0xC9, 0x97, 0x98, 0xB0, 0xCE, 0x3C, 0xC1, 0x0D, 0x2A, 0x59, 0x6D,
+ 0x56, 0x5D, 0xBA, 0x82, 0xF8, 0x8C, 0xE4, 0xCF, 0xF3, 0xB3, 0x3D,
+ 0x5D, 0x24, 0xE9, 0xC0, 0x83, 0x11, 0x24, 0xBF, 0x1A, 0xD5, 0x4B,
+ 0x79, 0x25, 0x32, 0x98, 0x3D, 0xD6, 0xC3, 0xA8, 0xB7, 0xD0},
+ /*lengthinbits*/
+ 16448,
+ /*key*/
+ {0xB3, 0x12, 0x0F, 0xFD, 0xB2, 0xCF, 0x6A, 0xF4, 0xE7, 0x3E, 0xAF,
+ 0x2E, 0xF4, 0xEB, 0xEC, 0x69},
+ /*KeyLeninBytes*/
+ 16,
+ /*exp out*/
+ {0x17, 0x9F, 0x2F, 0xA6},
+ /*iv*/
+ {0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37, 0xA9, 0x6F, 0x39,
+ 0x3C, 0x6B, 0x22, 0xF7, 0x37},
+ /*ivLeninbytes*/
+ 16,
+ /*direction*/
+ 1,
+ /* IV params */
+ {0x296F393C, 0x6B227737, 0x1},
+ }}; // snow3g f9 3GPP test vectors
+
+cipher_test_vector_t *snow3g_cipher_test_vectors[] = {
+ snow3g_f8_vectors, snow3g_f8_vectors};
+
+uint32_t numSnow3gCipherTestVectors[] = {
+ sizeof(snow3g_f8_vectors) / sizeof(cipher_test_vector_t),
+ sizeof(snow3g_f8_vectors) / sizeof(cipher_test_vector_t),
+ sizeof(snow3g_f8_linear_bitvectors) / sizeof(cipherbit_test_vector_t),
+ sizeof(snow3g_f8_linear_bitvectors) / sizeof(cipherbit_test_vector_t)};
+
+hash_test_vector_t *snow3g_hash_test_vectors[] = {snow_f9_vectors, snow_f9_vectors,
+ snow_f9_vectors};
+uint32_t numSnow3gHashTestVectors[] = {
+ sizeof(snow_f9_vectors) / sizeof(hash_test_vector_t),
+ sizeof(snow_f9_vectors) / sizeof(hash_test_vector_t),
+ sizeof(snow_f9_vectors) / sizeof(hash_test_vector_t)};
+
+#endif /*__SSO_TEST_VECTORS_H__*/
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/utils.c b/src/spdk/intel-ipsec-mb/LibTestApp/utils.c
new file mode 100644
index 000000000..3f9dfa2c7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/utils.c
@@ -0,0 +1,70 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "utils.h"
+
+#ifdef _WIN32
+#define snprintf _snprintf
+#endif
+
+void
+hexdump(FILE *fp,
+ const char *msg,
+ const void *p,
+ size_t len)
+{
+ unsigned int i, out, ofs;
+ const unsigned char *data = p;
+
+ fprintf(fp, "%s\n", msg);
+
+ ofs = 0;
+ while (ofs < len) {
+ char line[120];
+
+ out = snprintf(line, sizeof(line), "%08x:", ofs);
+ for (i = 0; ((ofs + i) < len) && (i < 16); i++)
+ out += snprintf(line + out, sizeof(line) - out,
+ " %02x", (data[ofs + i] & 0xff));
+ for (; i <= 16; i++)
+ out += snprintf(line + out, sizeof(line) - out, " | ");
+ for (i = 0; (ofs < len) && (i < 16); i++, ofs++) {
+ unsigned char c = data[ofs];
+
+ if ((c < ' ') || (c > '~'))
+ c = '.';
+ out += snprintf(line + out,
+ sizeof(line) - out, "%c", c);
+ }
+ fprintf(fp, "%s\n", line);
+ }
+}
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/utils.h b/src/spdk/intel-ipsec-mb/LibTestApp/utils.h
new file mode 100644
index 000000000..1312ea2b7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/utils.h
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+#ifndef TESTAPP_UTILS_H
+#define TESTAPP_UTILS_H
+
+#define DIM(_x) (sizeof(_x)/sizeof(_x[0]))
+
+void hexdump(FILE *fp, const char *msg, const void *p, size_t len);
+
+#endif /* TESTAPP_UTILS_H */
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/win_x64.mak b/src/spdk/intel-ipsec-mb/LibTestApp/win_x64.mak
new file mode 100644
index 000000000..1069130f7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/win_x64.mak
@@ -0,0 +1,151 @@
+#
+# Copyright (c) 2017-2019, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+TEST_APP = ipsec_MB_testapp
+XVALID_APP = ipsec_xvalid_test
+INSTNAME = intel-ipsec-mb
+
+!if !defined(PREFIX)
+PREFIX = C:\Program Files
+!endif
+
+!if exist("$(PREFIX)\$(INSTNAME)\libIPSec_MB.lib")
+IPSECLIB = "$(PREFIX)\$(INSTNAME)\libIPSec_MB.lib"
+INCDIR = -I"$(PREFIX)\$(INSTNAME)"
+!else
+IPSECLIB = ..\libIPSec_MB.lib
+INCDIR = -I..\ -I..\include
+!endif
+
+!ifdef DEBUG
+DCFLAGS = /Od /DDEBUG /Z7
+DLFLAGS = /debug
+!else
+DCFLAGS = /O2 /Oi
+DLFLAGS =
+!endif
+
+!if "$(GCM_BIG_DATA)" == "y"
+GCM_CFLAGS = /DGCM_BIG_DATA
+!else
+GCM_CFLAGS =
+!endif
+
+CC = cl
+# _CRT_SECURE_NO_WARNINGS disables warning C4996 about unsecure snprintf() being used
+CFLAGS = /nologo /D_CRT_SECURE_NO_WARNINGS $(DCFLAGS) /Y- /W3 /WX- /Gm- /fp:precise /EHsc $(EXTRA_CFLAGS) $(GCM_CFLAGS) $(INCDIR)
+
+LNK = link
+TEST_LFLAGS = /out:$(TEST_APP).exe $(DLFLAGS)
+XVALID_LFLAGS = /out:$(XVALID_APP).exe $(DLFLAGS)
+
+AS = nasm
+AFLAGS = -fwin64 -Xvc -DWIN_ABI
+
+TEST_OBJS = main.obj gcm_test.obj ctr_test.obj customop_test.obj des_test.obj ccm_test.obj cmac_test.obj hmac_sha1_test.obj hmac_sha256_sha512_test.obj utils.obj hmac_md5_test.obj aes_test.obj sha_test.obj chained_test.obj api_test.obj pon_test.obj ecb_test.obj zuc_test.obj kasumi_test.obj snow3g_test.obj direct_api_test.obj
+
+XVALID_OBJS = ipsec_xvalid.obj misc.obj
+
+all: $(TEST_APP).exe $(XVALID_APP).exe
+
+$(TEST_APP).exe: $(TEST_OBJS) $(IPSECLIB)
+ $(LNK) $(TEST_LFLAGS) $(TEST_OBJS) $(IPSECLIB)
+
+$(XVALID_APP).exe: $(XVALID_OBJS) $(IPSECLIB)
+ $(LNK) $(XVALID_LFLAGS) $(XVALID_OBJS) $(IPSECLIB)
+
+misc.obj: misc.asm
+ $(AS) -o $@ $(AFLAGS) misc.asm
+
+main.obj: main.c do_test.h
+ $(CC) /c $(CFLAGS) main.c
+
+gcm_test.obj: gcm_test.c gcm_ctr_vectors_test.h
+ $(CC) /c $(CFLAGS) gcm_test.c
+
+ctr_test.obj: ctr_test.c gcm_ctr_vectors_test.h
+ $(CC) /c $(CFLAGS) ctr_test.c
+
+pon_test.obj: pon_test.c gcm_ctr_vectors_test.h
+ $(CC) /c $(CFLAGS) pon_test.c
+
+customop_test.obj: customop_test.c customop_test.h
+ $(CC) /c $(CFLAGS) customop_test.c
+
+des_test.obj: des_test.c gcm_ctr_vectors_test.h
+ $(CC) /c $(CFLAGS) des_test.c
+
+ccm_test.obj: ccm_test.c gcm_ctr_vectors_test.h utils.h
+ $(CC) /c $(CFLAGS) ccm_test.c
+
+cmac_test.obj: cmac_test.c utils.h
+ $(CC) /c $(CFLAGS) cmac_test.c
+
+hmac_sha1_test.obj: hmac_sha1_test.c utils.h
+ $(CC) /c $(CFLAGS) hmac_sha1_test.c
+
+hmac_sha256_sha512_test.obj: hmac_sha256_sha512_test.c utils.h
+ $(CC) /c $(CFLAGS) hmac_sha256_sha512_test.c
+
+hmac_md5_test.obj: hmac_md5_test.c utils.h
+ $(CC) /c $(CFLAGS) hmac_md5_test.c
+
+aes_test.obj: aes_test.c utils.h
+ $(CC) /c $(CFLAGS) aes_test.c
+
+ecb_test.obj: ecb_test.c utils.h
+ $(CC) /c $(CFLAGS) ecb_test.c
+
+utils.obj: utils.c
+ $(CC) /c $(CFLAGS) utils.c
+
+sha_test.obj: sha_test.c utils.h
+ $(CC) /c $(CFLAGS) sha_test.c
+
+chained_test.obj: chained_test.c utils.h
+ $(CC) /c $(CFLAGS) chained_test.c
+
+api_test.obj: api_test.c gcm_ctr_vectors_test.h
+ $(CC) /c $(CFLAGS) api_test.c
+
+zuc_test.obj: zuc_test.c zuc_test_vectors.h
+ $(CC) /c $(CFLAGS) zuc_test.c
+
+kasumi_test.obj: kasumi_test.c kasumi_test_vectors.h
+ $(CC) /c $(CFLAGS) kasumi_test.c
+
+snow3g_test.obj: snow3g_test.c snow3g_test_vectors.h
+ $(CC) /c $(CFLAGS) snow3g_test.c
+
+direct_api_test.obj: direct_api_test.c
+ $(CC) /c $(CFLAGS) direct_api_test.c
+
+ipsec_xvalid.obj: ipsec_xvalid.c misc.h
+ $(CC) /c $(CFLAGS) ipsec_xvalid.c
+
+clean:
+ del /q $(TEST_OBJS) $(TEST_APP).* $(XVALID_OBJS) $(XVALID_APP).*
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test.c b/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test.c
new file mode 100644
index 000000000..3dede780f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test.c
@@ -0,0 +1,660 @@
+/*****************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+
+/*-----------------------------------------------------------------------
+* Zuc functional test
+*-----------------------------------------------------------------------
+*
+* A simple functional test for ZUC
+*
+*-----------------------------------------------------------------------*/
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <intel-ipsec-mb.h>
+
+#include "zuc_test_vectors.h"
+#include "gcm_ctr_vectors_test.h"
+
+#define MAXBUFS 9
+#define PASS_STATUS 0
+#define FAIL_STATUS -1
+
+int zuc_test(const enum arch_type arch, struct MB_MGR *mb_mgr);
+
+int validate_zuc_algorithm(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV);
+int validate_zuc_EEA_1_block(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV);
+int validate_zuc_EEA_4_block(struct MB_MGR *mb_mgr, uint8_t **pSrcData,
+ uint8_t **pDstData, uint8_t **pKeys,
+ uint8_t **pIV);
+int validate_zuc_EEA_n_block(struct MB_MGR *mb_mgr, uint8_t **pSrcData,
+ uint8_t **pDstData, uint8_t **pKeys, uint8_t **pIV,
+ uint32_t numBuffs);
+int validate_zuc_EIA_1_block(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV);
+static void byte_hexdump(const char *message, const uint8_t *ptr, int len);
+
+/******************************************************************************
+ * @ingroup zuc_functionalTest_app
+ *
+ * @description
+ * This function allocates memory for buffers and set random data in each buffer
+ *
+ * pSrcData = pointers to the new source buffers
+ * numOfBuffs = number of buffers
+ * ************************************************/
+static uint32_t createData(uint8_t *pSrcData[MAXBUFS],
+ uint32_t numOfBuffs)
+{
+ uint32_t i = 0, j = 0;
+
+ for (i = 0; i < numOfBuffs; i++) {
+ pSrcData[i] = (uint8_t *)malloc(MAX_BUFFER_LENGTH_IN_BYTES);
+
+ if (!pSrcData[i]) {
+ printf("malloc(pSrcData[i]): failed!\n");
+
+ for (j = 0; j < i; j++) {
+ free(pSrcData[j]);
+ pSrcData[j] = NULL;
+ }
+
+ return FAIL_STATUS;
+ }
+ }
+ return PASS_STATUS;
+}
+
+/******************************************************************************
+ * @ingroup zuc_functionalTest_app
+ *
+ * @description
+ * This function creates source data and vector buffers.
+ *
+ * keyLen = key length
+ * pKeys = array of pointers to the new key buffers
+ * ivLen = vector length
+ * pIV = array of pointers to the new vector buffers
+ * numOfBuffs = number of buffers
+************************************************/
+static uint32_t createKeyVecData(uint32_t keyLen, uint8_t *pKeys[MAXBUFS],
+ uint32_t ivLen, uint8_t *pIV[MAXBUFS],
+ uint32_t numOfBuffs)
+{
+ uint32_t i = 0, j = 0;
+
+ for (i = 0; i < numOfBuffs; i++) {
+ pIV[i] = (uint8_t *)malloc(ivLen);
+
+ if (!pIV[i]) {
+ printf("malloc(pIV[i]): failed!\n");
+
+ for (j = 0; j < i; j++) {
+ free(pIV[j]);
+ free(pKeys[j]);
+ }
+
+ return FAIL_STATUS;
+ }
+
+ pKeys[i] = malloc(keyLen);
+
+ if (!pKeys[i]) {
+ printf("malloc(pKeys[i]): failed!\n");
+
+ for (j = 0; j <= i; j++) {
+ free(pIV[j]);
+
+ if (j < i)
+ free(pKeys[j]);
+ }
+ return FAIL_STATUS;
+ }
+ }
+
+ return PASS_STATUS;
+}
+
+/******************************************************************************
+ * @ingroup zuc_benchmark_app
+ *
+ * @description
+ * This function free memory pointed to by an array of pointers
+ *
+ * arr = array of memory pointers
+ * length = length of pointer array (or number of pointers whose buffers
+ * should be freed)
+ * ************************************************/
+static void freePtrArray(uint8_t *pArr[MAXBUFS], uint32_t arrayLength)
+{
+ uint32_t i = 0;
+
+ for (i = 0; i < arrayLength; i++)
+ free(pArr[i]);
+}
+
+static uint32_t bswap4(const uint32_t val)
+{
+ return ((val >> 24) | /**< A*/
+ ((val & 0xff0000) >> 8) | /**< B*/
+ ((val & 0xff00) << 8) | /**< C*/
+ (val << 24)); /**< D*/
+}
+
+int zuc_test(const enum arch_type arch, struct MB_MGR *mb_mgr)
+{
+
+ uint32_t numBuffs, a;
+ uint32_t status = PASS_STATUS;
+ uint8_t *pKeys[MAXBUFS];
+ uint8_t *pIV[MAXBUFS];
+ uint8_t *pSrcData[MAXBUFS];
+ uint8_t *pDstData[MAXBUFS];
+
+ /* Do not run the tests for aesni emulation */
+ if (arch == ARCH_NO_AESNI)
+ return 0;
+
+ printf("Running Functional Tests\n");
+ fflush(stdout);
+
+ /*Create test data buffers + populate with random data*/
+ if (createData(pSrcData, MAXBUFS)) {
+ printf("createData() error\n");
+ return FAIL_STATUS;
+ }
+ if (createData(pDstData, MAXBUFS)) {
+ printf("createData() error\n");
+ return FAIL_STATUS;
+ }
+
+ /*Create random keys and vectors*/
+ if (createKeyVecData(ZUC_KEY_LEN_IN_BYTES, pKeys, ZUC_IV_LEN_IN_BYTES,
+ pIV, MAXBUFS)) {
+ printf("createKeyVecData() error\n");
+ freePtrArray(pSrcData, MAXBUFS);
+ freePtrArray(pDstData, MAXBUFS);
+ return FAIL_STATUS;
+ }
+
+ if (validate_zuc_algorithm(mb_mgr, pSrcData[0], pSrcData[0], pKeys[0],
+ pIV[0]))
+ status = 1;
+ else
+ printf("validate ZUC algorithm: PASS\n");
+
+ if (validate_zuc_EEA_1_block(mb_mgr, pSrcData[0], pSrcData[0], pKeys[0],
+ pIV[0]))
+ status = 1;
+ else
+ printf("validate ZUC 1 block: PASS\n");
+
+ if (validate_zuc_EEA_4_block(mb_mgr, pSrcData, pSrcData, pKeys, pIV))
+ status = 1;
+ else
+ printf("validate ZUC 4 block: PASS\n");
+
+ for (a = 0; a < 3; a++) {
+ switch (a) {
+ case 0:
+ numBuffs = 4;
+ break;
+ case 1:
+ numBuffs = 8;
+ break;
+ default:
+ numBuffs = 9;
+ break;
+ }
+ if (validate_zuc_EEA_n_block(mb_mgr, pSrcData, pDstData, pKeys,
+ pIV, numBuffs))
+ status = 1;
+ else
+ printf("validate ZUC n block buffers %d: PASS\n", a);
+ }
+
+ if (validate_zuc_EIA_1_block(mb_mgr, pSrcData[0], pDstData[0], pKeys[0],
+ pIV[0]))
+ status = 1;
+ else
+ printf("validate ZUC Integrity 1 block: PASS\n");
+
+ freePtrArray(pKeys, MAXBUFS); /*Free the key buffers*/
+ freePtrArray(pIV, MAXBUFS); /*Free the vector buffers*/
+ freePtrArray(pSrcData, MAXBUFS); /*Free the source buffers*/
+ freePtrArray(pDstData, MAXBUFS); /*Free the destination buffers*/
+ if (status)
+ return status;
+
+ printf("The Functional Test application completed\n");
+ return 0;
+}
+
+int validate_zuc_EEA_1_block(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV)
+{
+ uint32_t i, byteResidue;
+ int retTmp, ret = 0;
+ uint32_t byteLength;
+ uint32_t bitResidue;
+
+ for (i = 0; i < NUM_ZUC_EEA3_TESTS; i++) {
+ memcpy(pKeys, testEEA3_vectors[i].CK, ZUC_KEY_LEN_IN_BYTES);
+ zuc_eea3_iv_gen(testEEA3_vectors[i].count,
+ testEEA3_vectors[i].Bearer,
+ testEEA3_vectors[i].Direction,
+ pIV);
+ byteLength = (testEEA3_vectors[i].length_in_bits + 7) / 8;
+ memcpy(pSrcData, testEEA3_vectors[i].plaintext, byteLength);
+ IMB_ZUC_EEA3_1_BUFFER(mb_mgr, pKeys, pIV, pSrcData, pDstData,
+ byteLength);
+ retTmp = memcmp(pDstData, testEEA3_vectors[i].ciphertext,
+ byteLength - 1);
+ if (retTmp) {
+ printf("Validate ZUC 1 block test %d (Enc): FAIL\n",
+ i + 1);
+ byte_hexdump("Expected", testEEA3_vectors[i].ciphertext,
+ byteLength);
+ byte_hexdump("Found", pDstData, byteLength);
+ ret = retTmp;
+ } else {
+ bitResidue =
+ (0xFF00 >>
+ (testEEA3_vectors[i].length_in_bits % 8)) &
+ 0x00FF;
+ byteResidue =
+ (testEEA3_vectors[i].ciphertext
+ [testEEA3_vectors[i].length_in_bits / 8] ^
+ pDstData[testEEA3_vectors[i].length_in_bits / 8]) &
+ bitResidue;
+ if (byteResidue) {
+ printf("Validate ZUC 1 block test %d (Enc): "
+ "FAIL\n",
+ i + 1);
+ printf("Expected: 0x%02X (last byte)\n",
+ 0xFF &
+ testEEA3_vectors[i]
+ .ciphertext[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ printf("Found: 0x%02X (last byte)\n",
+ 0xFF & pDstData[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ } else
+ printf("Validate ZUC 1 block test %d (Enc): "
+ "PASS\n",
+ i + 1);
+ }
+ fflush(stdout);
+ }
+ return ret;
+};
+int validate_zuc_EEA_4_block(struct MB_MGR *mb_mgr, uint8_t **pSrcData,
+ uint8_t **pDstData, uint8_t **pKeys, uint8_t **pIV)
+{
+ uint32_t i, j, packetLen[4], bitResidue, byteResidue;
+ int retTmp, ret = 0;
+
+ for (i = 0; i < NUM_ZUC_EEA3_TESTS; i++) {
+ for (j = 0; j < 4; j++) {
+ packetLen[j] =
+ (testEEA3_vectors[i].length_in_bits + 7) / 8;
+ memcpy(pKeys[j], testEEA3_vectors[i].CK,
+ ZUC_KEY_LEN_IN_BYTES);
+ zuc_eea3_iv_gen(testEEA3_vectors[i].count,
+ testEEA3_vectors[i].Bearer,
+ testEEA3_vectors[i].Direction,
+ pIV[j]);
+ memcpy(pSrcData[j], testEEA3_vectors[i].plaintext,
+ packetLen[j]);
+ }
+ IMB_ZUC_EEA3_4_BUFFER(mb_mgr, (const void * const *)pKeys,
+ (const void * const *)pIV,
+ (const void * const *)pSrcData,
+ (void **)pDstData, packetLen);
+ uint8_t *pDst8 = (uint8_t *)pDstData[0];
+
+ retTmp = memcmp(pDst8, testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits) / 8);
+ if (retTmp) {
+ printf("Validate ZUC 4 block (Enc) test %d: FAIL\n",
+ i + 1);
+ byte_hexdump("Expected", testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ byte_hexdump("Found", pDst8,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ ret = retTmp;
+ } else {
+ bitResidue =
+ (0xFF00 >>
+ (testEEA3_vectors[i].length_in_bits % 8)) &
+ 0x00FF;
+ byteResidue =
+ (testEEA3_vectors[i].ciphertext
+ [testEEA3_vectors[i].length_in_bits / 8] ^
+ pDst8[testEEA3_vectors[i].length_in_bits / 8]) &
+ bitResidue;
+ if (byteResidue) {
+ ret = 1;
+ printf("Validate ZUC 4 block test %d (Enc): "
+ "FAIL\n",
+ i + 1);
+ printf("Expected: 0x%02X (last byte)\n",
+ 0xFF &
+ testEEA3_vectors[i]
+ .ciphertext[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ printf("Found: 0x%02X (last byte)\n",
+ 0xFF & pDst8[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ } else
+ printf("Validate ZUC 4 block test %d (Enc): "
+ "PASS\n",
+ i + 1);
+ }
+ fflush(stdout);
+ for (j = 0; j < 4; j++) {
+ memcpy(pSrcData[j], testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits + 7) / 8);
+ }
+ IMB_ZUC_EEA3_4_BUFFER(mb_mgr, (const void * const *)pKeys,
+ (const void * const *)pIV,
+ (const void * const *)pSrcData,
+ (void **)pDstData, packetLen);
+ pDst8 = (uint8_t *)pDstData[0];
+ retTmp = memcmp(pDst8, testEEA3_vectors[i].plaintext,
+ (testEEA3_vectors[i].length_in_bits) / 8);
+ if (retTmp) {
+ printf("Validate ZUC 4 block (Dec) test %d: FAIL\n",
+ i + 1);
+ byte_hexdump("Expected", testEEA3_vectors[i].plaintext,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ byte_hexdump("Found", pDst8,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ ret = retTmp;
+ } else {
+ bitResidue =
+ (0xFF00 >>
+ (testEEA3_vectors[i].length_in_bits % 8)) &
+ 0x00FF;
+ byteResidue =
+ (testEEA3_vectors[i]
+ .plaintext[testEEA3_vectors[i].length_in_bits /
+ 8] ^
+ pDst8[testEEA3_vectors[i].length_in_bits / 8]) &
+ bitResidue;
+ if (byteResidue) {
+ ret = 1;
+ printf("Validate ZUC 4 block test %d (Dec): "
+ "FAIL\n",
+ i + 1);
+ printf("Expected: 0x%02X (last byte)\n",
+ 0xFF &
+ testEEA3_vectors[i]
+ .plaintext[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ printf("Found: 0x%02X (last byte)\n",
+ 0xFF & pDst8[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ } else
+ printf("Validate ZUC 4 block test %d (Dec): "
+ "PASS\n",
+ i + 1);
+ }
+ fflush(stdout);
+ }
+ return ret;
+};
+
+int validate_zuc_EEA_n_block(struct MB_MGR *mb_mgr, uint8_t **pSrcData,
+ uint8_t **pDstData, uint8_t **pKeys, uint8_t **pIV,
+ uint32_t numBuffs)
+{
+ uint32_t i, j, bitResidue, byteResidue;
+ int retTmp, ret = 0;
+ uint32_t packetLen[MAXBUFS];
+
+ assert(numBuffs > 0);
+ for (i = 0; i < NUM_ZUC_EEA3_TESTS; i++) {
+ for (j = 0; j <= (numBuffs - 1); j++) {
+ memcpy(pKeys[j], testEEA3_vectors[i].CK,
+ ZUC_KEY_LEN_IN_BYTES);
+ zuc_eea3_iv_gen(testEEA3_vectors[i].count,
+ testEEA3_vectors[i].Bearer,
+ testEEA3_vectors[i].Direction,
+ pIV[j]);
+ memcpy(pSrcData[j], testEEA3_vectors[i].plaintext,
+ (testEEA3_vectors[i].length_in_bits + 7) / 8);
+ packetLen[j] =
+ (testEEA3_vectors[i].length_in_bits + 7) / 8;
+ }
+ IMB_ZUC_EEA3_N_BUFFER(mb_mgr, (const void * const *)pKeys,
+ (const void * const *)pIV,
+ (const void * const *)pSrcData,
+ (void **)pDstData, packetLen, numBuffs);
+ uint8_t *pDst8 = (uint8_t *)pDstData[0];
+
+ retTmp = memcmp(pDstData[0], testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits) / 8);
+ if (retTmp) {
+ printf("Validate ZUC n block (Enc) test %d, buffers: "
+ "%d: FAIL\n",
+ i + 1, numBuffs);
+ byte_hexdump("Expected", testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ byte_hexdump("Found", pDst8,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ ret = retTmp;
+ } else {
+ bitResidue =
+ (0xFF00 >>
+ (testEEA3_vectors[i].length_in_bits % 8)) &
+ 0x00FF;
+ byteResidue =
+ (testEEA3_vectors[i].ciphertext
+ [testEEA3_vectors[i].length_in_bits / 8] ^
+ pDst8[testEEA3_vectors[i].length_in_bits / 8]) &
+ bitResidue;
+ if (byteResidue) {
+ ret = 1;
+ printf("Validate ZUC n block (Enc) test %d, "
+ "buffers %d: FAIL\n",
+ i + 1, numBuffs);
+ printf("Expected: 0x%02X (last byte)\n",
+ 0xFF &
+ testEEA3_vectors[i]
+ .ciphertext[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ printf("Found: 0x%02X (last byte)\n",
+ 0xFF & pDst8[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ } else
+ printf("Validate ZUC n block (Enc) test %d, "
+ "buffers %d: PASS\n",
+ i + 1, numBuffs);
+ }
+ fflush(stdout);
+ for (j = 0; j <= (numBuffs - 1); j++) {
+ memcpy(pSrcData[j], testEEA3_vectors[i].ciphertext,
+ (testEEA3_vectors[i].length_in_bits + 7) / 8);
+ }
+ IMB_ZUC_EEA3_N_BUFFER(mb_mgr, (const void * const *)pKeys,
+ (const void * const *)pIV,
+ (const void * const *)pSrcData,
+ (void **)pDstData, packetLen, numBuffs);
+ retTmp = memcmp(pDstData[0], testEEA3_vectors[i].plaintext,
+ (testEEA3_vectors[i].length_in_bits) / 8);
+ if (retTmp) {
+ printf("Validate ZUC n block (Dec) test %d, buffers "
+ "%d: FAIL\n",
+ i + 1, numBuffs);
+ byte_hexdump("Expected", testEEA3_vectors[i].plaintext,
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ byte_hexdump("Found", pDstData[0],
+ (testEEA3_vectors[i].length_in_bits + 7) /
+ 8);
+ ret = retTmp;
+ } else {
+ bitResidue =
+ (0xFF00 >>
+ (testEEA3_vectors[i].length_in_bits % 8)) &
+ 0x00FF;
+ byteResidue =
+ (testEEA3_vectors[i]
+ .plaintext[testEEA3_vectors[i].length_in_bits /
+ 8] ^
+ pDst8[testEEA3_vectors[i].length_in_bits / 8]) &
+ bitResidue;
+ if (byteResidue) {
+ ret = 1;
+ printf("Validate ZUC n block (Dec) test %d, "
+ "buffers %d : FAIL\n",
+ i + 1, numBuffs);
+ printf("Expected: 0x%02X (last byte)\n",
+ 0xFF &
+ testEEA3_vectors[i]
+ .plaintext[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ printf("Found: 0x%02X (last byte)\n",
+ 0xFF & pDst8[testEEA3_vectors[i]
+ .length_in_bits /
+ 8]);
+ } else
+ printf("Validate ZUC n block (Dec) test %d, "
+ "buffers %d: PASS\n",
+ i + 1, numBuffs);
+ }
+ fflush(stdout);
+ }
+ return ret;
+};
+
+int validate_zuc_EIA_1_block(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV)
+{
+ uint32_t i;
+ int retTmp, ret = 0;
+ uint32_t byteLength;
+
+ for (i = 0; i < NUM_ZUC_EIA3_TESTS; i++) {
+ memcpy(pKeys, testEIA3_vectors[i].CK, ZUC_KEY_LEN_IN_BYTES);
+
+ zuc_eia3_iv_gen(testEIA3_vectors[i].count,
+ testEIA3_vectors[i].Bearer,
+ testEIA3_vectors[i].Direction,
+ pIV);
+ byteLength = (testEIA3_vectors[i].length_in_bits + 7) / 8;
+ memcpy(pSrcData, testEIA3_vectors[i].message, byteLength);
+ IMB_ZUC_EIA3_1_BUFFER(mb_mgr, pKeys, pIV, pSrcData,
+ testEIA3_vectors[i].length_in_bits,
+ (uint32_t *)pDstData);
+ retTmp =
+ memcmp(pDstData, &testEIA3_vectors[i].mac,
+ sizeof(((struct test128EIA3_vectors_t *)0)->mac));
+ if (retTmp) {
+ printf("Validate ZUC 1 block test %d (Int): FAIL\n",
+ i + 1);
+ byte_hexdump("Expected",
+ (const uint8_t *)&testEIA3_vectors[i].mac,
+ ZUC_DIGEST_LEN);
+ byte_hexdump("Found", pDstData, ZUC_DIGEST_LEN);
+ ret = retTmp;
+ } else
+ printf("Validate ZUC 1 block test %d (Int): PASS\n",
+ i + 1);
+ fflush(stdout);
+ }
+ return ret;
+};
+
+int validate_zuc_algorithm(struct MB_MGR *mb_mgr, uint8_t *pSrcData,
+ uint8_t *pDstData, uint8_t *pKeys, uint8_t *pIV)
+{
+ uint32_t i;
+ int ret = 0;
+ union SwapBytes {
+ uint8_t sbb[8];
+ uint32_t sbw[2];
+ } swapBytes;
+
+ for (i = 0; i < NUM_ZUC_ALG_TESTS; i++) {
+ memcpy(pKeys, testZUC_vectors[i].CK, ZUC_KEY_LEN_IN_BYTES);
+ memcpy(pIV, testZUC_vectors[i].IV, ZUC_IV_LEN_IN_BYTES);
+ memset(pSrcData, 0, 8);
+ IMB_ZUC_EEA3_1_BUFFER(mb_mgr, pKeys, pIV, pSrcData, pDstData,
+ 8);
+ swapBytes.sbw[0] = bswap4(testZUC_vectors[i].Z[0]);
+ swapBytes.sbw[1] = bswap4(testZUC_vectors[i].Z[1]);
+ ret = memcmp(pDstData, swapBytes.sbb, 8);
+ if (ret)
+ printf("ZUC 1 algorithm test %d: FAIL\n", i);
+ else
+ printf("ZUC 1 algorithm test %d: PASS\n", i);
+ }
+ return ret;
+};
+/*****************************************************************************
+ ** @description - utility function to dump test buffers$
+ ** $
+ ** @param message [IN] - debug message to print$
+ ** @param ptr [IN] - pointer to beginning of buffer.$
+ ** @param len [IN] - length of buffer.$
+ *****************************************************************************/
+static void byte_hexdump(const char *message, const uint8_t *ptr, int len)
+{
+ int ctr;
+
+ printf("%s:\n", message);
+ for (ctr = 0; ctr < len; ctr++) {
+ printf("0x%02X ", ptr[ctr] & 0xff);
+ if (!((ctr + 1) % 16))
+ printf("\n");
+ }
+ printf("\n");
+ printf("\n");
+};
diff --git a/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test_vectors.h b/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test_vectors.h
new file mode 100644
index 000000000..49dd96255
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/LibTestApp/zuc_test_vectors.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2009-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef __ZUC_TEST_VECTORS_H__
+#define __ZUC_TEST_VECTORS_H__
+
+#define MAX_BUFFER_LENGTH_IN_BITS 5670 /* biggest test is EIA test 5 */
+#define MAX_BUFFER_LENGTH_IN_BYTES ((MAX_BUFFER_LENGTH_IN_BITS) + 7)/8
+#define NUM_ZUC_ALG_TESTS 3
+#define NUM_ZUC_EEA3_TESTS 5
+#define NUM_ZUC_EIA3_TESTS 10
+#define ZUC_KEY_LEN_IN_BYTES 16
+#define ZUC_IV_LEN_IN_BYTES 16
+#define ZUC_DIGEST_LEN 4
+
+typedef struct testZUC_vectors_t {
+ uint8_t CK[16];
+ uint8_t IV[16];
+ uint32_t Z[2];
+
+} testZUC_vectors_t;
+typedef struct test128EEA3_vectors_t {
+ uint8_t CK[16];
+ uint32_t count;
+ uint8_t Bearer;
+ uint8_t Direction;
+ uint32_t length_in_bits;
+ uint8_t plaintext[MAX_BUFFER_LENGTH_IN_BYTES];
+ uint8_t ciphertext[MAX_BUFFER_LENGTH_IN_BYTES];
+} test128EEA_vectors_t;
+
+typedef struct test128EIA3_vectors_t {
+ uint8_t CK[16];
+ uint32_t count;
+ uint8_t Bearer;
+ uint8_t Direction;
+ uint32_t length_in_bits;
+ uint8_t message[MAX_BUFFER_LENGTH_IN_BYTES];
+ uint8_t mac[4];
+} test128EIA_vectors_t;
+
+/*
+ *
+ * ZUC algorithm tests from 3GPP Document3: Implementator's Test Data. Version 1.1 (4th Jan. 2011).
+ *
+ */
+const struct testZUC_vectors_t testZUC_vectors[] = {
+ {
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0x27BEDE74, 0x018082DA}
+ },
+ {
+ {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ {0x0657CFA0, 0x7096398B}
+ },
+ {
+ {0x3D, 0x4C, 0x4B, 0xE9, 0x6A, 0x82, 0xFD, 0xAE, 0xB5, 0x8F, 0x64, 0x1D, 0xB1, 0x7B, 0x45, 0x5B},
+ {0x84, 0x31, 0x9A, 0xA8, 0xDE, 0x69, 0x15, 0xCA, 0x1F, 0x6B, 0xDA, 0x6B, 0xFB, 0xD8, 0xC7, 0x66},
+ {0x14F1C272, 0x3279C419}
+ },
+ {
+ {0x4D, 0x32, 0x0B, 0xFA, 0xD4, 0xC2, 0x85, 0xBF, 0xD6, 0xB8, 0xBD, 0x00, 0xF3, 0x9D, 0x8B, 0x41},
+ {0x52, 0x95, 0x9D, 0xAB, 0xA0, 0xBF, 0x17, 0x6E, 0xCE, 0x2D, 0xC3, 0x15, 0x04, 0x9E, 0xB5, 0x74},
+ {0xED4400E7, 0x0633E5C5}
+ },
+};
+const struct test128EEA3_vectors_t testEEA3_vectors[] = {
+ /* TestSet1*/
+ {
+ {0x17, 0x3D, 0x14, 0xBA, 0x50, 0x03, 0x73, 0x1D, 0x7A, 0x60, 0x04, 0x94, 0x70, 0xF0, 0x0A, 0x29},
+ 0x66035492,
+ 0x0F,
+ 0x0,
+ 193,
+ /* plaintext*/
+ {0x6C, 0xF6, 0x53, 0x40, 0x73, 0x55, 0x52, 0xAB,
+ 0x0C, 0x97, 0x52, 0xFA, 0x6F, 0x90, 0x25, 0xFE,
+ 0x0B, 0xD6, 0x75, 0xD9, 0x00, 0x58, 0x75, 0xB2,
+ 0x00, 0x00, 0x00, 0x00},
+ /*ciphertext*/
+ {0xA6, 0xC8, 0x5F, 0xC6, 0x6A, 0xFB, 0x85, 0x33,
+ 0xAA, 0xFC, 0x25, 0x18, 0xDF, 0xE7, 0x84, 0x94,
+ 0x0E, 0xE1, 0xE4, 0xB0, 0x30, 0x23, 0x8C, 0xC8,
+ 0x00, 0x00, 0x00, 0x00}
+ },
+ /*TestSet2*/
+ {
+ {0xE5, 0xBD, 0x3E, 0xA0, 0xEB, 0x55, 0xAD, 0xE8, 0x66, 0xC6, 0xAC, 0x58, 0xBD, 0x54, 0x30, 0x2A},
+ 0x56823,
+ 0x18,
+ 0x1,
+ 800,
+ /*plaintext*/
+ {0x14, 0xA8, 0xEF, 0x69, 0x3D,0x67, 0x85, 0x07,
+ 0xBB, 0xE7, 0x27, 0x0A, 0x7F, 0x67, 0xFF, 0x50,
+ 0x06, 0xC3, 0x52, 0x5B, 0x98, 0x07, 0xE4, 0x67,
+ 0xC4, 0xE5, 0x60, 0x00, 0xBA, 0x33, 0x8F, 0x5D,
+ 0x42, 0x95, 0x59, 0x03, 0x67, 0x51, 0x82, 0x22,
+ 0x46, 0xC8, 0x0D, 0x3B, 0x38, 0xF0, 0x7F, 0x4B,
+ 0xE2, 0xD8, 0xFF, 0x58, 0x05, 0xF5, 0x13, 0x22,
+ 0x29, 0xBD, 0xE9, 0x3B, 0xBB, 0xDC, 0xAF, 0x38,
+ 0x2B, 0xF1, 0xEE, 0x97, 0x2F, 0xBF, 0x99, 0x77,
+ 0xBA, 0xDA, 0x89, 0x45, 0x84, 0x7A, 0x2A, 0x6C,
+ 0x9A, 0xD3, 0x4A, 0x66, 0x75, 0x54, 0xE0, 0x4D,
+ 0x1F, 0x7F, 0xA2, 0xC3, 0x32, 0x41, 0xBD, 0x8F,
+ 0x01, 0xBA, 0x22, 0x0D},
+ /*ciphertext*/
+ {0x13, 0x1D, 0x43, 0xE0, 0xDE, 0xA1, 0xBE, 0x5C,
+ 0x5A, 0x1B, 0xFD, 0x97, 0x1D, 0x85, 0x2C, 0xBF,
+ 0x71, 0x2D, 0x7B, 0x4F, 0x57, 0x96, 0x1F, 0xEA,
+ 0x32, 0x08, 0xAF, 0xA8, 0xBC, 0xA4, 0x33, 0xF4,
+ 0x56, 0xAD, 0x09, 0xC7, 0x41, 0x7E, 0x58, 0xBC,
+ 0x69, 0xCF, 0x88, 0x66, 0xD1, 0x35, 0x3F, 0x74,
+ 0x86, 0x5E, 0x80, 0x78, 0x1D, 0x20, 0x2D, 0xFB,
+ 0x3E, 0xCF, 0xF7, 0xFC, 0xBC, 0x3B, 0x19, 0x0F,
+ 0xE8, 0x2A, 0x20, 0x4E, 0xD0, 0xE3, 0x50, 0xFC,
+ 0x0F, 0x6F, 0x26, 0x13, 0xB2, 0xF2, 0xBC, 0xA6,
+ 0xDF, 0x5A, 0x47, 0x3A, 0x57, 0xA4, 0xA0, 0x0D,
+ 0x98, 0x5E, 0xBA, 0xD8, 0x80, 0xD6, 0xF2, 0x38,
+ 0x64, 0xA0, 0x7B, 0x01}
+ },
+ /*TestSet3*/
+ {
+ {0xD4, 0x55, 0x2A, 0x8F, 0xD6, 0xE6, 0x1C, 0xC8, 0x1A, 0x20, 0x09, 0x14, 0x1A, 0x29, 0xC1, 0x0B},
+ 0x76452EC1,
+ 0x2,
+ 0x1,
+ 1570,
+ /* plaintext*/
+ {0x38, 0xF0, 0x7F, 0x4B, 0xE2, 0xD8, 0xFF, 0x58,
+ 0x05, 0xF5, 0x13, 0x22, 0x29, 0xBD, 0xE9, 0x3B,
+ 0xBB, 0xDC, 0xAF, 0x38, 0x2B, 0xF1, 0xEE, 0x97,
+ 0x2F, 0xBF, 0x99, 0x77, 0xBA, 0xDA, 0x89, 0x45,
+ 0x84, 0x7A, 0x2A, 0x6C, 0x9A, 0xD3, 0x4A, 0x66,
+ 0x75, 0x54, 0xE0, 0x4D, 0x1F, 0x7F, 0xA2, 0xC3,
+ 0x32, 0x41, 0xBD, 0x8F, 0x01, 0xBA, 0x22, 0x0D,
+ 0x3C, 0xA4, 0xEC, 0x41, 0xE0, 0x74, 0x59, 0x5F,
+ 0x54, 0xAE, 0x2B, 0x45, 0x4F, 0xD9, 0x71, 0x43,
+ 0x20, 0x43, 0x60, 0x19, 0x65, 0xCC, 0xA8, 0x5C,
+ 0x24, 0x17, 0xED, 0x6C, 0xBE, 0xC3, 0xBA, 0xDA,
+ 0x84, 0xFC, 0x8A, 0x57, 0x9A, 0xEA, 0x78, 0x37,
+ 0xB0, 0x27, 0x11, 0x77, 0x24, 0x2A, 0x64, 0xDC,
+ 0x0A, 0x9D, 0xE7, 0x1A, 0x8E, 0xDE, 0xE8, 0x6C,
+ 0xA3, 0xD4, 0x7D, 0x03, 0x3D, 0x6B, 0xF5, 0x39,
+ 0x80, 0x4E, 0xCA, 0x86, 0xC5, 0x84, 0xA9, 0x05,
+ 0x2D, 0xE4, 0x6A, 0xD3, 0xFC, 0xED, 0x65, 0x54,
+ 0x3B, 0xD9, 0x02, 0x07, 0x37, 0x2B, 0x27, 0xAF,
+ 0xB7, 0x92, 0x34, 0xF5, 0xFF, 0x43, 0xEA, 0x87,
+ 0x08, 0x20, 0xE2, 0xC2, 0xB7, 0x8A, 0x8A, 0xAE,
+ 0x61, 0xCC, 0xE5, 0x2A, 0x05, 0x15, 0xE3, 0x48,
+ 0xD1, 0x96, 0x66, 0x4A, 0x34, 0x56, 0xB1, 0x82,
+ 0xA0, 0x7C, 0x40, 0x6E, 0x4A, 0x20, 0x79, 0x12,
+ 0x71, 0xCF, 0xED, 0xA1, 0x65, 0xD5, 0x35, 0xEC,
+ 0x5E, 0xA2, 0xD4, 0xDF, 0x40, 0x00, 0x00, 0x00},
+ /*ciphertext*/
+ {0x83, 0x83, 0xB0, 0x22, 0x9F, 0xCC, 0x0B, 0x9D,
+ 0x22, 0x95, 0xEC, 0x41, 0xC9, 0x77, 0xE9, 0xC2,
+ 0xBB, 0x72, 0xE2, 0x20, 0x37, 0x81, 0x41, 0xF9,
+ 0xC8, 0x31, 0x8F, 0x3A, 0x27, 0x0D, 0xFB, 0xCD,
+ 0xEE, 0x64, 0x11, 0xC2, 0xB3, 0x04, 0x4F, 0x17,
+ 0x6D, 0xC6, 0xE0, 0x0F, 0x89, 0x60, 0xF9, 0x7A,
+ 0xFA, 0xCD, 0x13, 0x1A, 0xD6, 0xA3, 0xB4, 0x9B,
+ 0x16, 0xB7, 0xBA, 0xBC, 0xF2, 0xA5, 0x09, 0xEB,
+ 0xB1, 0x6A, 0x75, 0xDC, 0xAB, 0x14, 0xFF, 0x27,
+ 0x5D, 0xBE, 0xEE, 0xA1, 0xA2, 0xB1, 0x55, 0xF9,
+ 0xD5, 0x2C, 0x26, 0x45, 0x2D, 0x01, 0x87, 0xC3,
+ 0x10, 0xA4, 0xEE, 0x55, 0xBE, 0xAA, 0x78, 0xAB,
+ 0x40, 0x24, 0x61, 0x5B, 0xA9, 0xF5, 0xD5, 0xAD,
+ 0xC7, 0x72, 0x8F, 0x73, 0x56, 0x06, 0x71, 0xF0,
+ 0x13, 0xE5, 0xE5, 0x50, 0x08, 0x5D, 0x32, 0x91,
+ 0xDF, 0x7D, 0x5F, 0xEC, 0xED, 0xDE, 0xD5, 0x59,
+ 0x64, 0x1B, 0x6C, 0x2F, 0x58, 0x52, 0x33, 0xBC,
+ 0x71, 0xE9, 0x60, 0x2B, 0xD2, 0x30, 0x58, 0x55,
+ 0xBB, 0xD2, 0x5F, 0xFA, 0x7F, 0x17, 0xEC, 0xBC,
+ 0x04, 0x2D, 0xAA, 0xE3, 0x8C, 0x1F, 0x57, 0xAD,
+ 0x8E, 0x8E, 0xBD, 0x37, 0x34, 0x6F, 0x71, 0xBE,
+ 0xFD, 0xBB, 0x74, 0x32, 0xE0, 0xE0, 0xBB, 0x2C,
+ 0xFC, 0x09, 0xBC, 0xD9, 0x65, 0x70, 0xCB, 0x0C,
+ 0x0C, 0x39, 0xDF, 0x5E, 0x29, 0x29, 0x4E, 0x82,
+ 0x70, 0x3A, 0x63, 0x7F, 0x80, 0x00, 0x00, 0x00}
+ },
+ /*TestSet4*/
+ {
+ {0xDB, 0x84, 0xB4, 0xFB, 0xCC, 0xDA, 0x56, 0x3B, 0x66, 0x22, 0x7B, 0xFE, 0x45, 0x6F, 0x0F, 0x77},
+ 0xE4850FE1,
+ 0x10,
+ 0x1,
+ 2798,
+ /*plaintext*/
+ {0xE5, 0x39, 0xF3, 0xB8, 0x97, 0x32, 0x40, 0xDA,
+ 0x03, 0xF2, 0xB8, 0xAA, 0x05, 0xEE, 0x0A, 0x00,
+ 0xDB, 0xAF, 0xC0, 0xE1, 0x82, 0x05, 0x5D, 0xFE,
+ 0x3D, 0x73, 0x83, 0xD9, 0x2C, 0xEF, 0x40, 0xE9,
+ 0x29, 0x28, 0x60, 0x5D, 0x52, 0xD0, 0x5F, 0x4F,
+ 0x90, 0x18, 0xA1, 0xF1, 0x89, 0xAE, 0x39, 0x97,
+ 0xCE, 0x19, 0x15, 0x5F, 0xB1, 0x22, 0x1D, 0xB8,
+ 0xBB, 0x09, 0x51, 0xA8, 0x53, 0xAD, 0x85, 0x2C,
+ 0xE1, 0x6C, 0xFF, 0x07, 0x38, 0x2C, 0x93, 0xA1,
+ 0x57, 0xDE, 0x00, 0xDD, 0xB1, 0x25, 0xC7, 0x53,
+ 0x9F, 0xD8, 0x50, 0x45, 0xE4, 0xEE, 0x07, 0xE0,
+ 0xC4, 0x3F, 0x9E, 0x9D, 0x6F, 0x41, 0x4F, 0xC4,
+ 0xD1, 0xC6, 0x29, 0x17, 0x81, 0x3F, 0x74, 0xC0,
+ 0x0F, 0xC8, 0x3F, 0x3E, 0x2E, 0xD7, 0xC4, 0x5B,
+ 0xA5, 0x83, 0x52, 0x64, 0xB4, 0x3E, 0x0B, 0x20,
+ 0xAF, 0xDA, 0x6B, 0x30, 0x53, 0xBF, 0xB6, 0x42,
+ 0x3B, 0x7F, 0xCE, 0x25, 0x47, 0x9F, 0xF5, 0xF1,
+ 0x39, 0xDD, 0x9B, 0x5B, 0x99, 0x55, 0x58, 0xE2,
+ 0xA5, 0x6B, 0xE1, 0x8D, 0xD5, 0x81, 0xCD, 0x01,
+ 0x7C, 0x73, 0x5E, 0x6F, 0x0D, 0x0D, 0x97, 0xC4,
+ 0xDD, 0xC1, 0xD1, 0xDA, 0x70, 0xC6, 0xDB, 0x4A,
+ 0x12, 0xCC, 0x92, 0x77, 0x8E, 0x2F, 0xBB, 0xD6,
+ 0xF3, 0xBA, 0x52, 0xAF, 0x91, 0xC9, 0xC6, 0xB6,
+ 0x4E, 0x8D, 0xA4, 0xF7, 0xA2, 0xC2, 0x66, 0xD0,
+ 0x2D, 0x00, 0x17, 0x53, 0xDF, 0x08, 0x96, 0x03,
+ 0x93, 0xC5, 0xD5, 0x68, 0x88, 0xBF, 0x49, 0xEB,
+ 0x5C, 0x16, 0xD9, 0xA8, 0x04, 0x27, 0xA4, 0x16,
+ 0xBC, 0xB5, 0x97, 0xDF, 0x5B, 0xFE, 0x6F, 0x13,
+ 0x89, 0x0A, 0x07, 0xEE, 0x13, 0x40, 0xE6, 0x47,
+ 0x6B, 0x0D, 0x9A, 0xA8, 0xF8, 0x22, 0xAB, 0x0F,
+ 0xD1, 0xAB, 0x0D, 0x20, 0x4F, 0x40, 0xB7, 0xCE,
+ 0x6F, 0x2E, 0x13, 0x6E, 0xB6, 0x74, 0x85, 0xE5,
+ 0x07, 0x80, 0x4D, 0x50, 0x45, 0x88, 0xAD, 0x37,
+ 0xFF, 0xD8, 0x16, 0x56, 0x8B, 0x2D, 0xC4, 0x03,
+ 0x11, 0xDF, 0xB6, 0x54, 0xCD, 0xEA, 0xD4, 0x7E,
+ 0x23, 0x85, 0xC3, 0x43, 0x62, 0x03, 0xDD, 0x83,
+ 0x6F, 0x9C, 0x64, 0xD9, 0x74, 0x62, 0xAD, 0x5D,
+ 0xFA, 0x63, 0xB5, 0xCF, 0xE0, 0x8A, 0xCB, 0x95,
+ 0x32, 0x86, 0x6F, 0x5C, 0xA7, 0x87, 0x56, 0x6F,
+ 0xCA, 0x93, 0xE6, 0xB1, 0x69, 0x3E, 0xE1, 0x5C,
+ 0xF6, 0xF7, 0xA2, 0xD6, 0x89, 0xD9, 0x74, 0x17,
+ 0x98, 0xDC, 0x1C, 0x23, 0x8E, 0x1B, 0xE6, 0x50,
+ 0x73, 0x3B, 0x18, 0xFB, 0x34, 0xFF, 0x88, 0x0E,
+ 0x16, 0xBB, 0xD2, 0x1B, 0x47, 0xAC, 0x00, 0x00},
+ /*ciphertext*/
+ {0x4B, 0xBF, 0xA9, 0x1B, 0xA2, 0x5D, 0x47, 0xDB,
+ 0x9A, 0x9F, 0x19, 0x0D, 0x96, 0x2A, 0x19, 0xAB,
+ 0x32, 0x39, 0x26, 0xB3, 0x51, 0xFB, 0xD3, 0x9E,
+ 0x35, 0x1E, 0x05, 0xDA, 0x8B, 0x89, 0x25, 0xE3,
+ 0x0B, 0x1C, 0xCE, 0x0D, 0x12, 0x21, 0x10, 0x10,
+ 0x95, 0x81, 0x5C, 0xC7, 0xCB, 0x63, 0x19, 0x50,
+ 0x9E, 0xC0, 0xD6, 0x79, 0x40, 0x49, 0x19, 0x87,
+ 0xE1, 0x3F, 0x0A, 0xFF, 0xAC, 0x33, 0x2A, 0xA6,
+ 0xAA, 0x64, 0x62, 0x6D, 0x3E, 0x9A, 0x19, 0x17,
+ 0x51, 0x9E, 0x0B, 0x97, 0xB6, 0x55, 0xC6, 0xA1,
+ 0x65, 0xE4, 0x4C, 0xA9, 0xFE, 0xAC, 0x07, 0x90,
+ 0xD2, 0xA3, 0x21, 0xAD, 0x3D, 0x86, 0xB7, 0x9C,
+ 0x51, 0x38, 0x73, 0x9F, 0xA3, 0x8D, 0x88, 0x7E,
+ 0xC7, 0xDE, 0xF4, 0x49, 0xCE, 0x8A, 0xBD, 0xD3,
+ 0xE7, 0xF8, 0xDC, 0x4C, 0xA9, 0xE7, 0xB7, 0x33,
+ 0x14, 0xAD, 0x31, 0x0F, 0x90, 0x25, 0xE6, 0x19,
+ 0x46, 0xB3, 0xA5, 0x6D, 0xC6, 0x49, 0xEC, 0x0D,
+ 0xA0, 0xD6, 0x39, 0x43, 0xDF, 0xF5, 0x92, 0xCF,
+ 0x96, 0x2A, 0x7E, 0xFB, 0x2C, 0x85, 0x24, 0xE3,
+ 0x5A, 0x2A, 0x6E, 0x78, 0x79, 0xD6, 0x26, 0x04,
+ 0xEF, 0x26, 0x86, 0x95, 0xFA, 0x40, 0x03, 0x02,
+ 0x7E, 0x22, 0xE6, 0x08, 0x30, 0x77, 0x52, 0x20,
+ 0x64, 0xBD, 0x4A, 0x5B, 0x90, 0x6B, 0x5F, 0x53,
+ 0x12, 0x74, 0xF2, 0x35, 0xED, 0x50, 0x6C, 0xFF,
+ 0x01, 0x54, 0xC7, 0x54, 0x92, 0x8A, 0x0C, 0xE5,
+ 0x47, 0x6F, 0x2C, 0xB1, 0x02, 0x0A, 0x12, 0x22,
+ 0xD3, 0x2C, 0x14, 0x55, 0xEC, 0xAE, 0xF1, 0xE3,
+ 0x68, 0xFB, 0x34, 0x4D, 0x17, 0x35, 0xBF, 0xBE,
+ 0xDE, 0xB7, 0x1D, 0x0A, 0x33, 0xA2, 0xA5, 0x4B,
+ 0x1D, 0xA5, 0xA2, 0x94, 0xE6, 0x79, 0x14, 0x4D,
+ 0xDF, 0x11, 0xEB, 0x1A, 0x3D, 0xE8, 0xCF, 0x0C,
+ 0xC0, 0x61, 0x91, 0x79, 0x74, 0xF3, 0x5C, 0x1D,
+ 0x9C, 0xA0, 0xAC, 0x81, 0x80, 0x7F, 0x8F, 0xCC,
+ 0xE6, 0x19, 0x9A, 0x6C, 0x77, 0x12, 0xDA, 0x86,
+ 0x50, 0x21, 0xB0, 0x4C, 0xE0, 0x43, 0x95, 0x16,
+ 0xF1, 0xA5, 0x26, 0xCC, 0xDA, 0x9F, 0xD9, 0xAB,
+ 0xBD, 0x53, 0xC3, 0xA6, 0x84, 0xF9, 0xAE, 0x1E,
+ 0x7E, 0xE6, 0xB1, 0x1D, 0xA1, 0x38, 0xEA, 0x82,
+ 0x6C, 0x55, 0x16, 0xB5, 0xAA, 0xDF, 0x1A, 0xBB,
+ 0xE3, 0x6F, 0xA7, 0xFF, 0xF9, 0x2E, 0x3A, 0x11,
+ 0x76, 0x06, 0x4E, 0x8D, 0x95, 0xF2, 0xE4, 0x88,
+ 0x2B, 0x55, 0x00, 0xB9, 0x32, 0x28, 0xB2, 0x19,
+ 0x4A, 0x47, 0x5C, 0x1A, 0x27, 0xF6, 0x3F, 0x9F,
+ 0xFD, 0x26, 0x49, 0x89, 0xA1, 0xBC, 0x00, 0x00
+ }
+ },
+ /*TestSet5*/
+ {
+ {0xE1, 0x3F, 0xED, 0x21, 0xB4, 0x6E, 0x4E, 0x7E, 0xC3, 0x12, 0x53, 0xB2, 0xBB, 0x17, 0xB3, 0xE0},
+ 0x2738CDAA,
+ 0x1A,
+ 0x0,
+ 4019,
+ /*plaintext*/
+ {0x8D, 0x74, 0xE2, 0x0D, 0x54, 0x89, 0x4E, 0x06,
+ 0xD3, 0xCB, 0x13, 0xCB, 0x39, 0x33, 0x06, 0x5E,
+ 0x86, 0x74, 0xBE, 0x62, 0xAD, 0xB1, 0xC7, 0x2B,
+ 0x3A, 0x64, 0x69, 0x65, 0xAB, 0x63, 0xCB, 0x7B,
+ 0x78, 0x54, 0xDF, 0xDC, 0x27, 0xE8, 0x49, 0x29,
+ 0xF4, 0x9C, 0x64, 0xB8, 0x72, 0xA4, 0x90, 0xB1,
+ 0x3F, 0x95, 0x7B, 0x64, 0x82, 0x7E, 0x71, 0xF4,
+ 0x1F, 0xBD, 0x42, 0x69, 0xA4, 0x2C, 0x97, 0xF8,
+ 0x24, 0x53, 0x70, 0x27, 0xF8, 0x6E, 0x9F, 0x4A,
+ 0xD8, 0x2D, 0x1D, 0xF4, 0x51, 0x69, 0x0F, 0xDD,
+ 0x98, 0xB6, 0xD0, 0x3F, 0x3A, 0x0E, 0xBE, 0x3A,
+ 0x31, 0x2D, 0x6B, 0x84, 0x0B, 0xA5, 0xA1, 0x82,
+ 0x0B, 0x2A, 0x2C, 0x97, 0x09, 0xC0, 0x90, 0xD2,
+ 0x45, 0xED, 0x26, 0x7C, 0xF8, 0x45, 0xAE, 0x41,
+ 0xFA, 0x97, 0x5D, 0x33, 0x33, 0xAC, 0x30, 0x09,
+ 0xFD, 0x40, 0xEB, 0xA9, 0xEB, 0x5B, 0x88, 0x57,
+ 0x14, 0xB7, 0x68, 0xB6, 0x97, 0x13, 0x8B, 0xAF,
+ 0x21, 0x38, 0x0E, 0xCA, 0x49, 0xF6, 0x44, 0xD4,
+ 0x86, 0x89, 0xE4, 0x21, 0x57, 0x60, 0xB9, 0x06,
+ 0x73, 0x9F, 0x0D, 0x2B, 0x3F, 0x09, 0x11, 0x33,
+ 0xCA, 0x15, 0xD9, 0x81, 0xCB, 0xE4, 0x01, 0xBA,
+ 0xF7, 0x2D, 0x05, 0xAC, 0xE0, 0x5C, 0xCC, 0xB2,
+ 0xD2, 0x97, 0xF4, 0xEF, 0x6A, 0x5F, 0x58, 0xD9,
+ 0x12, 0x46, 0xCF, 0xA7, 0x72, 0x15, 0xB8, 0x92,
+ 0xAB, 0x44, 0x1D, 0x52, 0x78, 0x45, 0x27, 0x95,
+ 0xCC, 0xB7, 0xF5, 0xD7, 0x90, 0x57, 0xA1, 0xC4,
+ 0xF7, 0x7F, 0x80, 0xD4, 0x6D, 0xB2, 0x03, 0x3C,
+ 0xB7, 0x9B, 0xED, 0xF8, 0xE6, 0x05, 0x51, 0xCE,
+ 0x10, 0xC6, 0x67, 0xF6, 0x2A, 0x97, 0xAB, 0xAF,
+ 0xAB, 0xBC, 0xD6, 0x77, 0x20, 0x18, 0xDF, 0x96,
+ 0xA2, 0x82, 0xEA, 0x73, 0x7C, 0xE2, 0xCB, 0x33,
+ 0x12, 0x11, 0xF6, 0x0D, 0x53, 0x54, 0xCE, 0x78,
+ 0xF9, 0x91, 0x8D, 0x9C, 0x20, 0x6C, 0xA0, 0x42,
+ 0xC9, 0xB6, 0x23, 0x87, 0xDD, 0x70, 0x96, 0x04,
+ 0xA5, 0x0A, 0xF1, 0x6D, 0x8D, 0x35, 0xA8, 0x90,
+ 0x6B, 0xE4, 0x84, 0xCF, 0x2E, 0x74, 0xA9, 0x28,
+ 0x99, 0x40, 0x36, 0x43, 0x53, 0x24, 0x9B, 0x27,
+ 0xB4, 0xC9, 0xAE, 0x29, 0xED, 0xDF, 0xC7, 0xDA,
+ 0x64, 0x18, 0x79, 0x1A, 0x4E, 0x7B, 0xAA, 0x06,
+ 0x60, 0xFA, 0x64, 0x51, 0x1F, 0x2D, 0x68, 0x5C,
+ 0xC3, 0xA5, 0xFF, 0x70, 0xE0, 0xD2, 0xB7, 0x42,
+ 0x92, 0xE3, 0xB8, 0xA0, 0xCD, 0x6B, 0x04, 0xB1,
+ 0xC7, 0x90, 0xB8, 0xEA, 0xD2, 0x70, 0x37, 0x08,
+ 0x54, 0x0D, 0xEA, 0x2F, 0xC0, 0x9C, 0x3D, 0xA7,
+ 0x70, 0xF6, 0x54, 0x49, 0xE8, 0x4D, 0x81, 0x7A,
+ 0x4F, 0x55, 0x10, 0x55, 0xE1, 0x9A, 0xB8, 0x50,
+ 0x18, 0xA0, 0x02, 0x8B, 0x71, 0xA1, 0x44, 0xD9,
+ 0x67, 0x91, 0xE9, 0xA3, 0x57, 0x79, 0x33, 0x50,
+ 0x4E, 0xEE, 0x00, 0x60, 0x34, 0x0C, 0x69, 0xD2,
+ 0x74, 0xE1, 0xBF, 0x9D, 0x80, 0x5D, 0xCB, 0xCC,
+ 0x1A, 0x6F, 0xAA, 0x97, 0x68, 0x00, 0xB6, 0xFF,
+ 0x2B, 0x67, 0x1D, 0xC4, 0x63, 0x65, 0x2F, 0xA8,
+ 0xA3, 0x3E, 0xE5, 0x09, 0x74, 0xC1, 0xC2, 0x1B,
+ 0xE0, 0x1E, 0xAB, 0xB2, 0x16, 0x74, 0x30, 0x26,
+ 0x9D, 0x72, 0xEE, 0x51, 0x1C, 0x9D, 0xDE, 0x30,
+ 0x79, 0x7C, 0x9A, 0x25, 0xD8, 0x6C, 0xE7, 0x4F,
+ 0x5B, 0x96, 0x1B, 0xE5, 0xFD, 0xFB, 0x68, 0x07,
+ 0x81, 0x40, 0x39, 0xE7, 0x13, 0x76, 0x36, 0xBD,
+ 0x1D, 0x7F, 0xA9, 0xE0, 0x9E, 0xFD, 0x20, 0x07,
+ 0x50, 0x59, 0x06, 0xA5, 0xAC, 0x45, 0xDF, 0xDE,
+ 0xED, 0x77, 0x57, 0xBB, 0xEE, 0x74, 0x57, 0x49,
+ 0xC2, 0x96, 0x33, 0x35, 0x0B, 0xEE, 0x0E, 0xA6,
+ 0xF4, 0x09, 0xDF, 0x45, 0x80, 0x16, 0x00, 0x00},
+ /*ciphertext*/
+ {0x94, 0xEA, 0xA4, 0xAA, 0x30, 0xA5, 0x71, 0x37,
+ 0xDD, 0xF0, 0x9B, 0x97, 0xB2, 0x56, 0x18, 0xA2,
+ 0x0A, 0x13, 0xE2, 0xF1, 0x0F, 0xA5, 0xBF, 0x81,
+ 0x61, 0xA8, 0x79, 0xCC, 0x2A, 0xE7, 0x97, 0xA6,
+ 0xB4, 0xCF, 0x2D, 0x9D, 0xF3, 0x1D, 0xEB, 0xB9,
+ 0x90, 0x5C, 0xCF, 0xEC, 0x97, 0xDE, 0x60, 0x5D,
+ 0x21, 0xC6, 0x1A, 0xB8, 0x53, 0x1B, 0x7F, 0x3C,
+ 0x9D, 0xA5, 0xF0, 0x39, 0x31, 0xF8, 0xA0, 0x64,
+ 0x2D, 0xE4, 0x82, 0x11, 0xF5, 0xF5, 0x2F, 0xFE,
+ 0xA1, 0x0F, 0x39, 0x2A, 0x04, 0x76, 0x69, 0x98,
+ 0x5D, 0xA4, 0x54, 0xA2, 0x8F, 0x08, 0x09, 0x61,
+ 0xA6, 0xC2, 0xB6, 0x2D, 0xAA, 0x17, 0xF3, 0x3C,
+ 0xD6, 0x0A, 0x49, 0x71, 0xF4, 0x8D, 0x2D, 0x90,
+ 0x93, 0x94, 0xA5, 0x5F, 0x48, 0x11, 0x7A, 0xCE,
+ 0x43, 0xD7, 0x08, 0xE6, 0xB7, 0x7D, 0x3D, 0xC4,
+ 0x6D, 0x8B, 0xC0, 0x17, 0xD4, 0xD1, 0xAB, 0xB7,
+ 0x7B, 0x74, 0x28, 0xC0, 0x42, 0xB0, 0x6F, 0x2F,
+ 0x99, 0xD8, 0xD0, 0x7C, 0x98, 0x79, 0xD9, 0x96,
+ 0x00, 0x12, 0x7A, 0x31, 0x98, 0x5F, 0x10, 0x99,
+ 0xBB, 0xD7, 0xD6, 0xC1, 0x51, 0x9E, 0xDE, 0x8F,
+ 0x5E, 0xEB, 0x4A, 0x61, 0x0B, 0x34, 0x9A, 0xC0,
+ 0x1E, 0xA2, 0x35, 0x06, 0x91, 0x75, 0x6B, 0xD1,
+ 0x05, 0xC9, 0x74, 0xA5, 0x3E, 0xDD, 0xB3, 0x5D,
+ 0x1D, 0x41, 0x00, 0xB0, 0x12, 0xE5, 0x22, 0xAB,
+ 0x41, 0xF4, 0xC5, 0xF2, 0xFD, 0xE7, 0x6B, 0x59,
+ 0xCB, 0x8B, 0x96, 0xD8, 0x85, 0xCF, 0xE4, 0x08,
+ 0x0D, 0x13, 0x28, 0xA0, 0xD6, 0x36, 0xCC, 0x0E,
+ 0xDC, 0x05, 0x80, 0x0B, 0x76, 0xAC, 0xCA, 0x8F,
+ 0xEF, 0x67, 0x20, 0x84, 0xD1, 0xF5, 0x2A, 0x8B,
+ 0xBD, 0x8E, 0x09, 0x93, 0x32, 0x09, 0x92, 0xC7,
+ 0xFF, 0xBA, 0xE1, 0x7C, 0x40, 0x84, 0x41, 0xE0,
+ 0xEE, 0x88, 0x3F, 0xC8, 0xA8, 0xB0, 0x5E, 0x22,
+ 0xF5, 0xFF, 0x7F, 0x8D, 0x1B, 0x48, 0xC7, 0x4C,
+ 0x46, 0x8C, 0x46, 0x7A, 0x02, 0x8F, 0x09, 0xFD,
+ 0x7C, 0xE9, 0x11, 0x09, 0xA5, 0x70, 0xA2, 0xD5,
+ 0xC4, 0xD5, 0xF4, 0xFA, 0x18, 0xC5, 0xDD, 0x3E,
+ 0x45, 0x62, 0xAF, 0xE2, 0x4E, 0xF7, 0x71, 0x90,
+ 0x1F, 0x59, 0xAF, 0x64, 0x58, 0x98, 0xAC, 0xEF,
+ 0x08, 0x8A, 0xBA, 0xE0, 0x7E, 0x92, 0xD5, 0x2E,
+ 0xB2, 0xDE, 0x55, 0x04, 0x5B, 0xB1, 0xB7, 0xC4,
+ 0x16, 0x4E, 0xF2, 0xD7, 0xA6, 0xCA, 0xC1, 0x5E,
+ 0xEB, 0x92, 0x6D, 0x7E, 0xA2, 0xF0, 0x8B, 0x66,
+ 0xE1, 0xF7, 0x59, 0xF3, 0xAE, 0xE4, 0x46, 0x14,
+ 0x72, 0x5A, 0xA3, 0xC7, 0x48, 0x2B, 0x30, 0x84,
+ 0x4C, 0x14, 0x3F, 0xF8, 0x5B, 0x53, 0xF1, 0xE5,
+ 0x83, 0xC5, 0x01, 0x25, 0x7D, 0xDD, 0xD0, 0x96,
+ 0xB8, 0x12, 0x68, 0xDA, 0xA3, 0x03, 0xF1, 0x72,
+ 0x34, 0xC2, 0x33, 0x35, 0x41, 0xF0, 0xBB, 0x8E,
+ 0x19, 0x06, 0x48, 0xC5, 0x80, 0x7C, 0x86, 0x6D,
+ 0x71, 0x93, 0x22, 0x86, 0x09, 0xAD, 0xB9, 0x48,
+ 0x68, 0x6F, 0x7D, 0xE2, 0x94, 0xA8, 0x02, 0xCC,
+ 0x38, 0xF7, 0xFE, 0x52, 0x08, 0xF5, 0xEA, 0x31,
+ 0x96, 0xD0, 0x16, 0x7B, 0x9B, 0xDD, 0x02, 0xF0,
+ 0xD2, 0xA5, 0x22, 0x1C, 0xA5, 0x08, 0xF8, 0x93,
+ 0xAF, 0x5C, 0x4B, 0x4B, 0xB9, 0xF4, 0xF5, 0x20,
+ 0xFD, 0x84, 0x28, 0x9B, 0x3D, 0xBE, 0x7E, 0x61,
+ 0x49, 0x7A, 0x7E, 0x2A, 0x58, 0x40, 0x37, 0xEA,
+ 0x63, 0x7B, 0x69, 0x81, 0x12, 0x71, 0x74, 0xAF,
+ 0x57, 0xB4, 0x71, 0xDF, 0x4B, 0x27, 0x68, 0xFD,
+ 0x79, 0xC1, 0x54, 0x0F, 0xB3, 0xED, 0xF2, 0xEA,
+ 0x22, 0xCB, 0x69, 0xBE, 0xC0, 0xCF, 0x8D, 0x93,
+ 0x3D, 0x9C, 0x6F, 0xDD, 0x64, 0x5E, 0x85, 0x05,
+ 0x91, 0xCC, 0xA3, 0xD6, 0x2C, 0x0C, 0xC0, 0x00}
+ }
+};
+const struct test128EIA3_vectors_t testEIA3_vectors[] = {
+ {
+ /*Test 1*/
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ 0x00000000,
+ 0x0,
+ 0x0,
+ 1,
+ {0x00, 0x00, 0x00, 0x00},
+ {0xC8, 0xA9, 0x59, 0x5E}
+ },
+ {
+ /*Test 2*/
+ {0x47, 0x05, 0x41, 0x25, 0x56, 0x1e, 0xb2, 0xdd, 0xa9, 0x40, 0x59, 0xda, 0x05, 0x09, 0x78, 0x50},
+ 0x561EB2DD,
+ 0x14,
+ 0,
+ 90,
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x67, 0x19, 0xA0, 0x88}
+ },
+ /*Test 3*/
+ {
+ {0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB, 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A},
+ 0xA94059DA,
+ 0xA,
+ 0x1,
+ 577,
+ {0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E, 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1,
+ 0xDE, 0x0B, 0x35, 0xDA, 0x2D, 0xC6, 0x2F, 0x83, 0xE7, 0xB7, 0x8D, 0x63, 0x06, 0xCA, 0x0E, 0xA0,
+ 0x7E, 0x94, 0x1B, 0x7B, 0xE9, 0x13, 0x48, 0xF9, 0xFC, 0xB1, 0x70, 0xE2, 0x21, 0x7F, 0xEC, 0xD9,
+ 0x7F, 0x9F, 0x68, 0xAD, 0xB1, 0x6E, 0x5D, 0x7D, 0x21, 0xE5, 0x69, 0xD2, 0x80, 0xED, 0x77, 0x5C,
+ 0xEB, 0xDE, 0x3F, 0x40, 0x93, 0xC5, 0x38, 0x81, 0x00, 0x00, 0x00, 0x00},
+ {0xFA, 0xE8, 0xFF, 0x0B}
+ },
+ /*Test 4*/
+ {
+ {0xc8, 0xa4, 0x82, 0x62, 0xd0, 0xc2, 0xe2, 0xba, 0xc4, 0xb9, 0x6e, 0xf7, 0x7e, 0x80, 0xca, 0x59},
+ 0x5097850,
+ 0x10,
+ 0x1,
+ 2079,
+ {0xb5, 0x46, 0x43, 0x0b, 0xf8, 0x7b, 0x4f, 0x1e, 0xe8, 0x34, 0x70, 0x4c, 0xd6, 0x95, 0x1c, 0x36, 0xe2, 0x6f, 0x10, 0x8c, 0xf7, 0x31, 0x78, 0x8f, 0x48, 0xdc, 0x34, 0xf1, 0x67, 0x8c, 0x05, 0x22,
+ 0x1c, 0x8f, 0xa7, 0xff, 0x2f, 0x39, 0xf4, 0x77, 0xe7, 0xe4, 0x9e, 0xf6, 0x0a, 0x4e, 0xc2, 0xc3, 0xde, 0x24, 0x31, 0x2a, 0x96, 0xaa, 0x26, 0xe1, 0xcf, 0xba, 0x57, 0x56, 0x38, 0x38, 0xb2, 0x97,
+ 0xf4, 0x7e, 0x85, 0x10, 0xc7, 0x79, 0xfd, 0x66, 0x54, 0xb1, 0x43, 0x38, 0x6f, 0xa6, 0x39, 0xd3, 0x1e, 0xdb, 0xd6, 0xc0, 0x6e, 0x47, 0xd1, 0x59, 0xd9, 0x43, 0x62, 0xf2, 0x6a, 0xee, 0xed, 0xee,
+ 0x0e, 0x4f, 0x49, 0xd9, 0xbf, 0x84, 0x12, 0x99, 0x54, 0x15, 0xbf, 0xad, 0x56, 0xee, 0x82, 0xd1, 0xca, 0x74, 0x63, 0xab, 0xf0, 0x85, 0xb0, 0x82, 0xb0, 0x99, 0x04, 0xd6, 0xd9, 0x90, 0xd4, 0x3c,
+ 0xf2, 0xe0, 0x62, 0xf4, 0x08, 0x39, 0xd9, 0x32, 0x48, 0xb1, 0xeb, 0x92, 0xcd, 0xfe, 0xd5, 0x30, 0x0b, 0xc1, 0x48, 0x28, 0x04, 0x30, 0xb6, 0xd0, 0xca, 0xa0, 0x94, 0xb6, 0xec, 0x89, 0x11, 0xab,
+ 0x7d, 0xc3, 0x68, 0x24, 0xb8, 0x24, 0xdc, 0x0a, 0xf6, 0x68, 0x2b, 0x09, 0x35, 0xfd, 0xe7, 0xb4, 0x92, 0xa1, 0x4d, 0xc2, 0xf4, 0x36, 0x48, 0x03, 0x8d, 0xa2, 0xcf, 0x79, 0x17, 0x0d, 0x2d, 0x50,
+ 0x13, 0x3f, 0xd4, 0x94, 0x16, 0xcb, 0x6e, 0x33, 0xbe, 0xa9, 0x0b, 0x8b, 0xf4, 0x55, 0x9b, 0x03, 0x73, 0x2a, 0x01, 0xea, 0x29, 0x0e, 0x6d, 0x07, 0x4f, 0x79, 0xbb, 0x83, 0xc1, 0x0e, 0x58, 0x00,
+ 0x15, 0xcc, 0x1a, 0x85, 0xb3, 0x6b, 0x55, 0x01, 0x04, 0x6e, 0x9c, 0x4b, 0xdc, 0xae, 0x51, 0x35, 0x69, 0x0b, 0x86, 0x66, 0xbd, 0x54, 0xb7, 0xa7, 0x03, 0xea, 0x7b, 0x6f, 0x22, 0x0a, 0x54, 0x69,
+ 0xa5, 0x68, 0x02, 0x7e},
+ {0x00, 0x4A, 0xC4, 0xD6}
+ },
+ /*Test 5*/
+ {
+ {0x6B, 0x8B, 0x08, 0xEE, 0x79, 0xE0, 0xB5, 0x98, 0x2D, 0x6D, 0x12, 0x8E, 0xA9, 0xF2, 0x20, 0xCB},
+ 0x561EB2DD,
+ 0x1C,
+ 0x0,
+ 5670,
+ {0x5B, 0xAD, 0x72, 0x47, 0x10, 0xBA, 0x1C, 0x56, 0xD5, 0xA3, 0x15, 0xF8, 0xD4, 0x0F, 0x6E, 0x09, 0x37, 0x80, 0xBE, 0x8E, 0x8D, 0xE0, 0x7B, 0x69, 0x92, 0x43, 0x20, 0x18, 0xE0, 0x8E, 0xD9, 0x6A,
+ 0x57, 0x34, 0xAF, 0x8B, 0xAD, 0x8A, 0x57, 0x5D, 0x3A, 0x1F, 0x16, 0x2F, 0x85, 0x04, 0x5C, 0xC7, 0x70, 0x92, 0x55, 0x71, 0xD9, 0xF5, 0xB9, 0x4E, 0x45, 0x4A, 0x77, 0xC1, 0x6E, 0x72, 0x93, 0x6B,
+ 0xF0, 0x16, 0xAE, 0x15, 0x74, 0x99, 0xF0, 0x54, 0x3B, 0x5D, 0x52, 0xCA, 0xA6, 0xDB, 0xEA, 0xB6, 0x97, 0xD2, 0xBB, 0x73, 0xE4, 0x1B, 0x80, 0x75, 0xDC, 0xE7, 0x9B, 0x4B, 0x86, 0x04, 0x4F, 0x66,
+ 0x1D, 0x44, 0x85, 0xA5, 0x43, 0xDD, 0x78, 0x60, 0x6E, 0x04, 0x19, 0xE8, 0x05, 0x98, 0x59, 0xD3, 0xCB, 0x2B, 0x67, 0xCE, 0x09, 0x77, 0x60, 0x3F, 0x81, 0xFF, 0x83, 0x9E, 0x33, 0x18, 0x59, 0x54,
+ 0x4C, 0xFB, 0xC8, 0xD0, 0x0F, 0xEF, 0x1A, 0x4C, 0x85, 0x10, 0xFB, 0x54, 0x7D, 0x6B, 0x06, 0xC6, 0x11, 0xEF, 0x44, 0xF1, 0xBC, 0xE1, 0x07, 0xCF, 0xA4, 0x5A, 0x06, 0xAA, 0xB3, 0x60, 0x15, 0x2B,
+ 0x28, 0xDC, 0x1E, 0xBE, 0x6F, 0x7F, 0xE0, 0x9B, 0x05, 0x16, 0xF9, 0xA5, 0xB0, 0x2A, 0x1B, 0xD8, 0x4B, 0xB0, 0x18, 0x1E, 0x2E, 0x89, 0xE1, 0x9B, 0xD8, 0x12, 0x59, 0x30, 0xD1, 0x78, 0x68, 0x2F,
+ 0x38, 0x62, 0xDC, 0x51, 0xB6, 0x36, 0xF0, 0x4E, 0x72, 0x0C, 0x47, 0xC3, 0xCE, 0x51, 0xAD, 0x70, 0xD9, 0x4B, 0x9B, 0x22, 0x55, 0xFB, 0xAE, 0x90, 0x65, 0x49, 0xF4, 0x99, 0xF8, 0xC6, 0xD3, 0x99,
+ 0x47, 0xED, 0x5E, 0x5D, 0xF8, 0xE2, 0xDE, 0xF1, 0x13, 0x25, 0x3E, 0x7B, 0x08, 0xD0, 0xA7, 0x6B, 0x6B, 0xFC, 0x68, 0xC8, 0x12, 0xF3, 0x75, 0xC7, 0x9B, 0x8F, 0xE5, 0xFD, 0x85, 0x97, 0x6A, 0xA6,
+ 0xD4, 0x6B, 0x4A, 0x23, 0x39, 0xD8, 0xAE, 0x51, 0x47, 0xF6, 0x80, 0xFB, 0xE7, 0x0F, 0x97, 0x8B, 0x38, 0xEF, 0xFD, 0x7B, 0x2F, 0x78, 0x66, 0xA2, 0x25, 0x54, 0xE1, 0x93, 0xA9, 0x4E, 0x98, 0xA6,
+ 0x8B, 0x74, 0xBD, 0x25, 0xBB, 0x2B, 0x3F, 0x5F, 0xB0, 0xA5, 0xFD, 0x59, 0x88, 0x7F, 0x9A, 0xB6, 0x81, 0x59, 0xB7, 0x17, 0x8D, 0x5B, 0x7B, 0x67, 0x7C, 0xB5, 0x46, 0xBF, 0x41, 0xEA, 0xDC, 0xA2,
+ 0x16, 0xFC, 0x10, 0x85, 0x01, 0x28, 0xF8, 0xBD, 0xEF, 0x5C, 0x8D, 0x89, 0xF9, 0x6A, 0xFA, 0x4F, 0xA8, 0xB5, 0x48, 0x85, 0x56, 0x5E, 0xD8, 0x38, 0xA9, 0x50, 0xFE, 0xE5, 0xF1, 0xC3, 0xB0, 0xA4,
+ 0xF6, 0xFB, 0x71, 0xE5, 0x4D, 0xFD, 0x16, 0x9E, 0x82, 0xCE, 0xCC, 0x72, 0x66, 0xC8, 0x50, 0xE6, 0x7C, 0x5E, 0xF0, 0xBA, 0x96, 0x0F, 0x52, 0x14, 0x06, 0x0E, 0x71, 0xEB, 0x17, 0x2A, 0x75, 0xFC,
+ 0x14, 0x86, 0x83, 0x5C, 0xBE, 0xA6, 0x53, 0x44, 0x65, 0xB0, 0x55, 0xC9, 0x6A, 0x72, 0xE4, 0x10, 0x52, 0x24, 0x18, 0x23, 0x25, 0xD8, 0x30, 0x41, 0x4B, 0x40, 0x21, 0x4D, 0xAA, 0x80, 0x91, 0xD2,
+ 0xE0, 0xFB, 0x01, 0x0A, 0xE1, 0x5C, 0x6D, 0xE9, 0x08, 0x50, 0x97, 0x3B, 0xDF, 0x1E, 0x42, 0x3B, 0xE1, 0x48, 0xA2, 0x37, 0xB8, 0x7A, 0x0C, 0x9F, 0x34, 0xD4, 0xB4, 0x76, 0x05, 0xB8, 0x03, 0xD7,
+ 0x43, 0xA8, 0x6A, 0x90, 0x39, 0x9A, 0x4A, 0xF3, 0x96, 0xD3, 0xA1, 0x20, 0x0A, 0x62, 0xF3, 0xD9, 0x50, 0x79, 0x62, 0xE8, 0xE5, 0xBE, 0xE6, 0xD3, 0xDA, 0x2B, 0xB3, 0xF7, 0x23, 0x76, 0x64, 0xAC,
+ 0x7A, 0x29, 0x28, 0x23, 0x90, 0x0B, 0xC6, 0x35, 0x03, 0xB2, 0x9E, 0x80, 0xD6, 0x3F, 0x60, 0x67, 0xBF, 0x8E, 0x17, 0x16, 0xAC, 0x25, 0xBE, 0xBA, 0x35, 0x0D, 0xEB, 0x62, 0xA9, 0x9F, 0xE0, 0x31,
+ 0x85, 0xEB, 0x4F, 0x69, 0x93, 0x7E, 0xCD, 0x38, 0x79, 0x41, 0xFD, 0xA5, 0x44, 0xBA, 0x67, 0xDB, 0x09, 0x11, 0x77, 0x49, 0x38, 0xB0, 0x18, 0x27, 0xBC, 0xC6, 0x9C, 0x92, 0xB3, 0xF7, 0x72, 0xA9,
+ 0xD2, 0x85, 0x9E, 0xF0, 0x03, 0x39, 0x8B, 0x1F, 0x6B, 0xBA, 0xD7, 0xB5, 0x74, 0xF7, 0x98, 0x9A, 0x1D, 0x10, 0xB2, 0xDF, 0x79, 0x8E, 0x0D, 0xBF, 0x30, 0xD6, 0x58, 0x74, 0x64, 0xD2, 0x48, 0x78,
+ 0xCD, 0x00, 0xC0, 0xEA, 0xEE, 0x8A, 0x1A, 0x0C, 0xC7, 0x53, 0xA2, 0x79, 0x79, 0xE1, 0x1B, 0x41, 0xDB, 0x1D, 0xE3, 0xD5, 0x03, 0x8A, 0xFA, 0xF4, 0x9F, 0x5C, 0x68, 0x2C, 0x37, 0x48, 0xD8, 0xA3,
+ 0xA9, 0xEC, 0x54, 0xE6, 0xA3, 0x71, 0x27, 0x5F, 0x16, 0x83, 0x51, 0x0F, 0x8E, 0x4F, 0x90, 0x93, 0x8F, 0x9A, 0xB6, 0xE1, 0x34, 0xC2, 0xCF, 0xDF, 0x48, 0x41, 0xCB, 0xA8, 0x8E, 0x0C, 0xFF, 0x2B,
+ 0x0B, 0xCC, 0x8E, 0x6A, 0xDC, 0xB7, 0x11, 0x09, 0xB5, 0x19, 0x8F, 0xEC, 0xF1, 0xBB, 0x7E, 0x5C, 0x53, 0x1A, 0xCA, 0x50, 0xA5, 0x6A, 0x8A, 0x3B, 0x6D, 0xE5, 0x98, 0x62, 0xD4, 0x1F, 0xA1, 0x13,
+ 0xD9, 0xCD, 0x95, 0x78, 0x08, 0xF0, 0x85, 0x71, 0xD9, 0xA4, 0xBB, 0x79, 0x2A, 0xF2, 0x71, 0xF6, 0xCC, 0x6D, 0xBB, 0x8D, 0xC7, 0xEC, 0x36, 0xE3, 0x6B, 0xE1, 0xED, 0x30, 0x81, 0x64, 0xC3, 0x1C,
+ 0x7C, 0x0A, 0xFC, 0x54, 0x1C},
+ {0x0C, 0xA1, 0x27, 0x92}
+ },
+ /*Custom test 1*/
+ {
+ {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F},
+ 0x01234567,
+ 0xA,
+ 0x0,
+ 63,
+ {0x5B, 0xAD, 0x72, 0x47, 0x10, 0xBA, 0x1C, 0x56},
+ {0x84, 0x9A, 0xCA, 0xDB}
+ },
+ /*Custom test 2*/
+ {
+ {0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB, 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A},
+ 0xA94059DA,
+ 0xA,
+ 0x1,
+ 62,
+ {0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E, 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1},
+ {0x81, 0x17, 0x55, 0x81}
+ },
+ /*Custom test 3*/
+ {
+ {0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB, 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A},
+ 0xA94059DA,
+ 0xA,
+ 0x0,
+ 512,
+ {0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E, 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1,
+ 0xDE, 0x0B, 0x35, 0xDA, 0x2D, 0xC6, 0x2F, 0x83, 0xE7, 0xB7, 0x8D, 0x63, 0x06, 0xCA, 0x0E, 0xA0,
+ 0x7E, 0x94, 0x1B, 0x7B, 0xE9, 0x13, 0x48, 0xF9, 0xFC, 0xB1, 0x70, 0xE2, 0x21, 0x7F, 0xEC, 0xD9,
+ 0x7F, 0x9F, 0x68, 0xAD, 0xB1, 0x6E, 0x5D, 0x7D, 0x21, 0xE5, 0x69, 0xD2, 0x80, 0xED, 0x77, 0x5C},
+ {0xBB, 0xAF, 0x2F, 0xC3}
+ },
+ /*Custom test 4*/
+ {
+ {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F},
+ 0x01234567,
+ 0xA,
+ 0x0,
+ 64,
+ {0x5B, 0xAD, 0x72, 0x47, 0x10, 0xBA, 0x1C, 0x56},
+ {0x1B, 0x3D, 0x0f, 0x74}
+ },
+ /*Custom test 5*/
+ {
+ {0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB, 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A},
+ 0xA94059DA,
+ 0xA,
+ 0x1,
+ 480,
+ {0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E, 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1,
+ 0xDE, 0x0B, 0x35, 0xDA, 0x2D, 0xC6, 0x2F, 0x83, 0xE7, 0xB7, 0x8D, 0x63, 0x06, 0xCA, 0x0E, 0xA0,
+ 0x7E, 0x94, 0x1B, 0x7B, 0xE9, 0x13, 0x48, 0xF9, 0xFC, 0xB1, 0x70, 0xE2, 0x21, 0x7F, 0xEC, 0xD9,
+ 0x7F, 0x9F, 0x68, 0xAD, 0xB1, 0x6E, 0x5D, 0x7D, 0x21, 0xE5, 0x69, 0xD2, 0x80, 0xED, 0x77, 0x5C},
+ {0x39, 0x5C, 0x11, 0x92}
+ },
+};
+#endif
diff --git a/src/spdk/intel-ipsec-mb/Makefile b/src/spdk/intel-ipsec-mb/Makefile
new file mode 100644
index 000000000..2d993bdf4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/Makefile
@@ -0,0 +1,690 @@
+#
+# Copyright (c) 2012-2019, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+LIB = libIPSec_MB
+SHARED ?= y
+IMB_HDR = intel-ipsec-mb.h
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+ifeq ($(IMB_VERSION),)
+$(error "Failed to detect library version!")
+endif
+
+VERSION = $(shell echo $(IMB_VERSION) | cut -d. -f1-3)
+SO_VERSION = $(shell echo $(VERSION) | cut -d. -f1)
+
+PREFIX ?= /usr
+LIB_INSTALL_DIR ?= $(PREFIX)/lib
+HDR_DIR ?= $(PREFIX)/include
+MAN_DIR ?= $(PREFIX)/man/man7
+MAN1 = libipsec-mb.7
+MAN2 = libipsec-mb-dev.7
+NOLDCONFIG ?= n
+
+USE_YASM ?= n
+YASM ?= yasm
+NASM ?= nasm
+
+OBJ_DIR = obj
+
+INCLUDE_DIRS := include . no-aesni
+INCLUDES := $(foreach i,$(INCLUDE_DIRS),-I $i)
+
+CC ?= gcc
+
+CFLAGS := -DLINUX $(EXTRA_CFLAGS) $(INCLUDES) \
+ -W -Wall -Wextra -Wmissing-declarations -Wpointer-arith \
+ -Wcast-qual -Wundef -Wwrite-strings \
+ -Wformat -Wformat-security \
+ -Wunreachable-code -Wmissing-noreturn -Wsign-compare -Wno-endif-labels \
+ -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition \
+ -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv
+
+ASM_INCLUDE_DIRS := .
+
+YASM_INCLUDES := $(foreach i,$(ASM_INCLUDE_DIRS),-I $i)
+YASM_FLAGS := -f x64 -f elf64 -X gnu -g dwarf2 -DLINUX -D__linux__ $(YASM_INCLUDES)
+
+NASM_INCLUDES := $(foreach i,$(ASM_INCLUDE_DIRS),-I$i/)
+NASM_FLAGS := -felf64 -Xgnu -gdwarf -DLINUX -D__linux__ $(NASM_INCLUDES)
+
+ifeq ($(DEBUG),y)
+CFLAGS += -g -DDEBUG
+OPT = -O0
+LDFLAGS += -g
+else
+OPT = -O3
+CFLAGS += -fstack-protector -D_FORTIFY_SOURCE=2
+endif
+
+ifeq ($(SAFE_DATA),y)
+CFLAGS += -DSAFE_DATA
+NASM_FLAGS += -DSAFE_DATA
+YASM_FLAGS += -DSAFE_DATA
+endif
+
+ifeq ($(SAFE_PARAM),y)
+CFLAGS += -DSAFE_PARAM
+NASM_FLAGS += -DSAFE_PARAM
+YASM_FLAGS += -DSAFE_PARAM
+endif
+
+ifneq ($(SAFE_LOOKUP),n)
+CFLAGS += -DSAFE_LOOKUP
+NASM_FLAGS += -DSAFE_LOOKUP
+YASM_FLAGS += -DSAFE_LOOKUP
+endif
+
+# prevent SIMD optimizations for non-aesni modules
+CFLAGS_NO_SIMD = $(CFLAGS) -O1
+CFLAGS += $(OPT)
+
+# Set architectural optimizations for GCC/CC
+ifeq ($(CC),$(filter $(CC),gcc cc))
+GCC_VERSION = $(shell $(CC) -dumpversion | cut -d. -f1)
+GCC_GE_V5 = $(shell [ $(GCC_VERSION) -ge 5 ] && echo true)
+ifeq ($(GCC_GE_V5),true)
+OPT_SSE = -march=nehalem -maes
+OPT_AVX = -march=sandybridge -maes
+OPT_AVX2 = -march=haswell -maes
+OPT_AVX512 = -march=broadwell
+OPT_NOAESNI = -march=nehalem
+endif
+endif
+
+# so or static build
+ifeq ($(SHARED),y)
+CFLAGS += -fPIC
+LIBNAME = $(LIB).so.$(VERSION)
+LIBPERM = 0755
+LDFLAGS += -z noexecstack -z relro -z now
+else
+# SPDK crypto needs shared but -fPIC so to make it clear what
+# we are doing, we just comment out the CFLAGS below and
+# explicitly set them in our relevant makefile.
+#CFLAGS += -fPIE
+LIBNAME = $(LIB).a
+LIBPERM = 0644
+LDFLAGS += -g
+endif
+
+# warning messages
+SAFE_PARAM_MSG1="SAFE_PARAM option not set."
+SAFE_PARAM_MSG2="Input parameters will not be checked."
+SAFE_DATA_MSG1="SAFE_DATA option not set."
+SAFE_DATA_MSG2="Stack and registers containing sensitive information, \
+ such keys or IV will not be cleared \
+ at the end of function calls."
+SAFE_LOOKUP_MSG1="SAFE_LOOKUP option not set."
+SAFE_LOOKUP_MSG2="Lookups which depend on sensitive information \
+ are not guaranteed to be done in constant time."
+
+ifeq ($(GCM_BIG_DATA),y)
+CFLAGS += -DGCM_BIG_DATA
+NASM_FLAGS += -DGCM_BIG_DATA
+YASM_FLAGS += -DGCM_BIG_DATA
+endif
+
+#
+# List of C modules (any origin)
+#
+c_lib_objs := \
+ mb_mgr_avx.o \
+ mb_mgr_avx2.o \
+ mb_mgr_avx512.o \
+ mb_mgr_sse.o \
+ mb_mgr_sse_no_aesni.o \
+ alloc.o \
+ aes_xcbc_expand_key.o \
+ md5_one_block.o \
+ sha_one_block.o \
+ des_key.o \
+ des_basic.o \
+ version.o \
+ cpu_feature.o \
+ aesni_emu.o \
+ kasumi_avx.o \
+ kasumi_iv.o \
+ kasumi_sse.o \
+ zuc_sse_top.o \
+ zuc_avx_top.o \
+ zuc_iv.o \
+ snow3g_sse.o \
+ snow3g_sse_no_aesni.o \
+ snow3g_avx.o \
+ snow3g_avx2.o \
+ snow3g_tables.o \
+ snow3g_iv.o
+
+#
+# List of ASM modules (root directory/common)
+#
+asm_generic_lib_objs := \
+ aes_keyexp_128.o \
+ aes_keyexp_192.o \
+ aes_keyexp_256.o \
+ aes_cmac_subkey_gen.o \
+ save_xmms.o \
+ clear_regs_mem_fns.o \
+ const.o \
+ aes128_ecbenc_x3.o \
+ zuc_common.o \
+ wireless_common.o \
+ constant_lookup.o
+
+#
+# List of ASM modules (no-aesni directory)
+#
+asm_noaesni_lib_objs := \
+ aes128_cbc_dec_by4_sse_no_aesni.o \
+ aes192_cbc_dec_by4_sse_no_aesni.o \
+ aes256_cbc_dec_by4_sse_no_aesni.o \
+ aes_cbc_enc_128_x4_no_aesni.o \
+ aes_cbc_enc_192_x4_no_aesni.o \
+ aes_cbc_enc_256_x4_no_aesni.o \
+ aes128_cntr_by4_sse_no_aesni.o \
+ aes192_cntr_by4_sse_no_aesni.o \
+ aes256_cntr_by4_sse_no_aesni.o \
+ aes_ecb_by4_sse_no_aesni.o \
+ aes128_cntr_ccm_by4_sse_no_aesni.o \
+ pon_sse_no_aesni.o \
+ aes_cfb_128_sse_no_aesni.o \
+ aes128_cbc_mac_x4_no_aesni.o \
+ aes_xcbc_mac_128_x4_no_aesni.o \
+ mb_mgr_aes_flush_sse_no_aesni.o \
+ mb_mgr_aes_submit_sse_no_aesni.o \
+ mb_mgr_aes192_flush_sse_no_aesni.o \
+ mb_mgr_aes192_submit_sse_no_aesni.o \
+ mb_mgr_aes256_flush_sse_no_aesni.o \
+ mb_mgr_aes256_submit_sse_no_aesni.o \
+ mb_mgr_aes_cmac_submit_flush_sse_no_aesni.o \
+ mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.o \
+ mb_mgr_aes_xcbc_flush_sse_no_aesni.o \
+ mb_mgr_aes_xcbc_submit_sse_no_aesni.o
+
+#
+# List of ASM modules (sse directory)
+#
+asm_sse_lib_objs := \
+ aes128_cbc_dec_by4_sse.o \
+ aes192_cbc_dec_by4_sse.o \
+ aes256_cbc_dec_by4_sse.o \
+ aes_cbc_enc_128_x4.o \
+ aes_cbc_enc_192_x4.o \
+ aes_cbc_enc_256_x4.o \
+ pon_sse.o \
+ aes128_cntr_by4_sse.o \
+ aes192_cntr_by4_sse.o \
+ aes256_cntr_by4_sse.o \
+ aes_ecb_by4_sse.o \
+ aes128_cntr_ccm_by4_sse.o \
+ aes_cfb_128_sse.o \
+ aes128_cbc_mac_x4.o \
+ aes_xcbc_mac_128_x4.o \
+ md5_x4x2_sse.o \
+ sha1_mult_sse.o \
+ sha1_one_block_sse.o \
+ sha224_one_block_sse.o \
+ sha256_one_block_sse.o \
+ sha384_one_block_sse.o \
+ sha512_one_block_sse.o \
+ sha512_x2_sse.o \
+ sha_256_mult_sse.o \
+ sha1_ni_x2_sse.o \
+ sha256_ni_x2_sse.o \
+ zuc_sse.o \
+ mb_mgr_aes_flush_sse.o \
+ mb_mgr_aes_submit_sse.o \
+ mb_mgr_aes192_flush_sse.o \
+ mb_mgr_aes192_submit_sse.o \
+ mb_mgr_aes256_flush_sse.o \
+ mb_mgr_aes256_submit_sse.o \
+ mb_mgr_aes_cmac_submit_flush_sse.o \
+ mb_mgr_aes_ccm_auth_submit_flush_sse.o \
+ mb_mgr_aes_xcbc_flush_sse.o \
+ mb_mgr_aes_xcbc_submit_sse.o \
+ mb_mgr_hmac_md5_flush_sse.o \
+ mb_mgr_hmac_md5_submit_sse.o \
+ mb_mgr_hmac_flush_sse.o \
+ mb_mgr_hmac_submit_sse.o \
+ mb_mgr_hmac_sha_224_flush_sse.o \
+ mb_mgr_hmac_sha_224_submit_sse.o \
+ mb_mgr_hmac_sha_256_flush_sse.o \
+ mb_mgr_hmac_sha_256_submit_sse.o \
+ mb_mgr_hmac_sha_384_flush_sse.o \
+ mb_mgr_hmac_sha_384_submit_sse.o \
+ mb_mgr_hmac_sha_512_flush_sse.o \
+ mb_mgr_hmac_sha_512_submit_sse.o \
+ mb_mgr_hmac_flush_ni_sse.o \
+ mb_mgr_hmac_submit_ni_sse.o \
+ mb_mgr_hmac_sha_224_flush_ni_sse.o \
+ mb_mgr_hmac_sha_224_submit_ni_sse.o \
+ mb_mgr_hmac_sha_256_flush_ni_sse.o \
+ mb_mgr_hmac_sha_256_submit_ni_sse.o
+
+#
+# List of ASM modules (avx directory)
+#
+asm_avx_lib_objs := \
+ aes_cbc_enc_128_x8.o \
+ aes_cbc_enc_192_x8.o \
+ aes_cbc_enc_256_x8.o \
+ aes128_cbc_dec_by8_avx.o \
+ aes192_cbc_dec_by8_avx.o \
+ aes256_cbc_dec_by8_avx.o \
+ pon_avx.o \
+ aes128_cntr_by8_avx.o \
+ aes192_cntr_by8_avx.o \
+ aes256_cntr_by8_avx.o \
+ aes128_cntr_ccm_by8_avx.o \
+ aes_ecb_by4_avx.o \
+ aes_cfb_128_avx.o \
+ aes128_cbc_mac_x8.o \
+ aes_xcbc_mac_128_x8.o \
+ md5_x4x2_avx.o \
+ sha1_mult_avx.o \
+ sha1_one_block_avx.o \
+ sha224_one_block_avx.o \
+ sha256_one_block_avx.o \
+ sha_256_mult_avx.o \
+ sha384_one_block_avx.o \
+ sha512_one_block_avx.o \
+ sha512_x2_avx.o \
+ zuc_avx.o \
+ mb_mgr_aes_flush_avx.o \
+ mb_mgr_aes_submit_avx.o \
+ mb_mgr_aes192_flush_avx.o \
+ mb_mgr_aes192_submit_avx.o \
+ mb_mgr_aes256_flush_avx.o \
+ mb_mgr_aes256_submit_avx.o \
+ mb_mgr_aes_cmac_submit_flush_avx.o\
+ mb_mgr_aes_ccm_auth_submit_flush_avx.o \
+ mb_mgr_aes_xcbc_flush_avx.o \
+ mb_mgr_aes_xcbc_submit_avx.o \
+ mb_mgr_hmac_md5_flush_avx.o \
+ mb_mgr_hmac_md5_submit_avx.o \
+ mb_mgr_hmac_flush_avx.o \
+ mb_mgr_hmac_submit_avx.o \
+ mb_mgr_hmac_sha_224_flush_avx.o \
+ mb_mgr_hmac_sha_224_submit_avx.o \
+ mb_mgr_hmac_sha_256_flush_avx.o \
+ mb_mgr_hmac_sha_256_submit_avx.o \
+ mb_mgr_hmac_sha_384_flush_avx.o \
+ mb_mgr_hmac_sha_384_submit_avx.o \
+ mb_mgr_hmac_sha_512_flush_avx.o \
+ mb_mgr_hmac_sha_512_submit_avx.o
+
+#
+# List of ASM modules (avx2 directory)
+#
+asm_avx2_lib_objs := \
+ md5_x8x2_avx2.o \
+ sha1_x8_avx2.o \
+ sha256_oct_avx2.o \
+ sha512_x4_avx2.o \
+ mb_mgr_hmac_md5_flush_avx2.o \
+ mb_mgr_hmac_md5_submit_avx2.o \
+ mb_mgr_hmac_flush_avx2.o \
+ mb_mgr_hmac_submit_avx2.o \
+ mb_mgr_hmac_sha_224_flush_avx2.o \
+ mb_mgr_hmac_sha_224_submit_avx2.o \
+ mb_mgr_hmac_sha_256_flush_avx2.o \
+ mb_mgr_hmac_sha_256_submit_avx2.o \
+ mb_mgr_hmac_sha_384_flush_avx2.o \
+ mb_mgr_hmac_sha_384_submit_avx2.o \
+ mb_mgr_hmac_sha_512_flush_avx2.o \
+ mb_mgr_hmac_sha_512_submit_avx2.o
+
+#
+# List of ASM modules (avx512 directory)
+#
+asm_avx512_lib_objs := \
+ sha1_x16_avx512.o \
+ sha256_x16_avx512.o \
+ sha512_x8_avx512.o \
+ des_x16_avx512.o \
+ cntr_vaes_avx512.o \
+ aes_cbc_dec_vaes_avx512.o \
+ aes_cbc_enc_vaes_avx512.o \
+ mb_mgr_aes_submit_avx512.o \
+ mb_mgr_aes_flush_avx512.o \
+ mb_mgr_aes192_submit_avx512.o \
+ mb_mgr_aes192_flush_avx512.o \
+ mb_mgr_aes256_submit_avx512.o \
+ mb_mgr_aes256_flush_avx512.o \
+ mb_mgr_hmac_flush_avx512.o \
+ mb_mgr_hmac_submit_avx512.o \
+ mb_mgr_hmac_sha_224_flush_avx512.o \
+ mb_mgr_hmac_sha_224_submit_avx512.o \
+ mb_mgr_hmac_sha_256_flush_avx512.o \
+ mb_mgr_hmac_sha_256_submit_avx512.o \
+ mb_mgr_hmac_sha_384_flush_avx512.o \
+ mb_mgr_hmac_sha_384_submit_avx512.o \
+ mb_mgr_hmac_sha_512_flush_avx512.o \
+ mb_mgr_hmac_sha_512_submit_avx512.o \
+ mb_mgr_des_avx512.o
+
+#
+# GCM object file lists
+#
+
+c_gcm_objs := gcm.o
+
+asm_noaesni_gcm_objs := \
+ gcm128_sse_no_aesni.o gcm192_sse_no_aesni.o gcm256_sse_no_aesni.o
+
+asm_sse_gcm_objs := \
+ gcm128_sse.o gcm192_sse.o gcm256_sse.o
+
+asm_avx_gcm_objs := \
+ gcm128_avx_gen2.o gcm192_avx_gen2.o gcm256_avx_gen2.o
+
+asm_avx2_gcm_objs := \
+ gcm128_avx_gen4.o gcm192_avx_gen4.o gcm256_avx_gen4.o
+
+asm_avx512_gcm_objs := \
+ gcm128_vaes_avx512.o gcm192_vaes_avx512.o gcm256_vaes_avx512.o \
+ gcm128_avx512.o gcm192_avx512.o gcm256_avx512.o
+
+#
+# build object files lists for GCM and NO-GCM variants
+#
+ifeq ($(NO_GCM), y)
+CFLAGS += -DNO_GCM
+asm_obj_files := $(asm_generic_lib_objs) $(asm_noaesni_lib_objs) \
+ $(asm_sse_lib_objs) $(asm_avx_lib_objs) \
+ $(asm_avx2_lib_objs) $(asm_avx512_lib_objs)
+c_obj_files := $(c_lib_objs)
+else
+asm_obj_files := $(asm_generic_lib_objs) \
+ $(asm_noaesni_lib_objs) $(asm_noaesni_gcm_objs) \
+ $(asm_sse_lib_objs) $(asm_sse_gcm_objs) \
+ $(asm_avx_lib_objs) $(asm_avx_gcm_objs) \
+ $(asm_avx2_lib_objs) $(asm_avx2_gcm_objs) \
+ $(asm_avx512_lib_objs) $(asm_avx512_gcm_objs)
+c_obj_files := $(c_lib_objs) $(c_gcm_objs)
+endif
+
+#
+# aggregate all objects files together and prefix with OBJDIR
+#
+lib_obj_files := $(asm_obj_files) $(c_obj_files)
+target_obj_files := $(lib_obj_files:%=$(OBJ_DIR)/%)
+
+#
+# create a list of dependency files for assembly modules
+# create a list of dependency files for c modules then
+# prefix these with OBJDIR
+#
+asm_dep_files := $(asm_obj_files:%.o=%.d)
+
+c_dep_files := $(c_obj_files:%.o=%.d)
+c_dep_target_files := $(c_dep_files:%=$(OBJ_DIR)/%)
+
+#
+# aggregate all dependency files together and prefix with OBJDIR
+#
+dep_files := $(asm_dep_files) $(c_dep_files)
+dep_target_files := $(dep_files:%=$(OBJ_DIR)/%)
+
+all: $(LIBNAME)
+
+$(LIBNAME): $(target_obj_files)
+ifeq ($(SHARED),y)
+ $(CC) -shared -Wl,-soname,$(LIB).so.$(SO_VERSION) -o $(LIBNAME) $^ -lc
+ ln -f -s $(LIBNAME) $(LIB).so.$(SO_VERSION)
+ ln -f -s $(LIB).so.$(SO_VERSION) $(LIB).so
+else
+ $(AR) -qcs $@ $^
+endif
+ifneq ($(SAFE_PARAM), y)
+ @echo "NOTE:" $(SAFE_PARAM_MSG1) $(SAFE_PARAM_MSG2)
+endif
+ifneq ($(SAFE_DATA), y)
+ @echo "NOTE:" $(SAFE_DATA_MSG1) $(SAFE_DATA_MSG2)
+endif
+ifeq ($(SAFE_LOOKUP), n)
+ @echo "NOTE:" $(SAFE_LOOKUP_MSG1) $(SAFE_LOOKUP_MSG2)
+endif
+
+.PHONY: install
+install: $(LIBNAME)
+ install -d $(HDR_DIR)
+ install -m 0644 $(IMB_HDR) $(HDR_DIR)
+ install -d $(LIB_INSTALL_DIR)
+ install -s -m $(LIBPERM) $(LIBNAME) $(LIB_INSTALL_DIR)
+ install -d $(MAN_DIR)
+ install -m 0444 $(MAN1) $(MAN_DIR)
+ install -m 0444 $(MAN2) $(MAN_DIR)
+ifeq ($(SHARED),y)
+ cd $(LIB_INSTALL_DIR); \
+ ln -f -s $(LIB).so.$(VERSION) $(LIB).so.$(SO_VERSION); \
+ ln -f -s $(LIB).so.$(SO_VERSION) $(LIB).so
+ifneq ($(NOLDCONFIG),y)
+ ldconfig
+endif
+endif
+
+.PHONY: uninstall
+uninstall: $(LIBNAME)
+ -rm -f $(HDR_DIR)/$(IMB_HDR)
+ -rm -f $(LIB_INSTALL_DIR)/$(LIBNAME)
+ -rm -f $(MAN_DIR)/$(MAN1)
+ -rm -f $(MAN_DIR)/$(MAN2)
+ifeq ($(SHARED),y)
+ -rm -f $(LIB_INSTALL_DIR)/$(LIB).so.$(SO_VERSION)
+ -rm -f $(LIB_INSTALL_DIR)/$(LIB).so
+endif
+
+.PHONY: build_c_dep_target_files
+build_c_dep_target_files: $(c_dep_target_files)
+
+$(target_obj_files): | $(OBJ_DIR) build_c_dep_target_files
+$(dep_target_files): | $(OBJ_DIR)
+
+#
+# dependency file build recipies
+#
+
+$(OBJ_DIR)/%.d:%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+$(OBJ_DIR)/%.d:sse/%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+$(OBJ_DIR)/%.d:avx/%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+$(OBJ_DIR)/%.d:avx2/%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+$(OBJ_DIR)/%.d:avx512/%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+$(OBJ_DIR)/%.d:no-aesni/%.c
+ $(CC) -MM -MP -MF $@ $(CFLAGS) $<
+
+#
+# object file build recipies
+#
+
+$(OBJ_DIR)/%.o:%.c
+ $(CC) -c $(CFLAGS) $< -o $@
+
+$(OBJ_DIR)/%.o:%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:sse/%.c
+ $(CC) $(OPT_SSE) -c $(CFLAGS) $< -o $@
+
+$(OBJ_DIR)/%.o:sse/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:avx/%.c
+ $(CC) $(OPT_AVX) -c $(CFLAGS) $< -o $@
+
+$(OBJ_DIR)/%.o:avx/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:avx2/%.c
+ $(CC) $(OPT_AVX2) -c $(CFLAGS) $< -o $@
+
+$(OBJ_DIR)/%.o:avx2/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:avx512/%.c
+ $(CC) $(OPT_AVX512) -c $(CFLAGS) $< -o $@
+
+$(OBJ_DIR)/%.o:avx512/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:include/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR)/%.o:no-aesni/%.c
+ $(CC) $(OPT_NOAESNI) -c $(CFLAGS_NO_SIMD) $< -o $@
+
+$(OBJ_DIR)/%.o:no-aesni/%.asm
+ifeq ($(USE_YASM),y)
+ $(YASM) $(YASM_FLAGS) $< -o $@
+else
+ $(NASM) -MD $(@:.o=.d) -MT $@ -o $@ $(NASM_FLAGS) $<
+endif
+
+$(OBJ_DIR):
+ mkdir $(OBJ_DIR)
+
+.PHONY: TAGS
+TAGS:
+ find ./ -name '*.[ch]' | etags -
+ find ./ -name '*.asm' | etags -a -
+ find ./ -name '*.inc' | etags -a -
+
+.PHONY: clean
+clean:
+ rm -Rf $(target_obj_files)
+ rm -Rf $(dep_target_files)
+ rm -f $(LIB).a $(LIB).so*
+
+.PHONY: help
+help:
+ @echo "Available build options:"
+ @echo "DEBUG=n (default)"
+ @echo " - this option will produce library not fit for debugging"
+ @echo "SHARED=y (default)"
+ @echo " - this option will produce shared library"
+ @echo "DEBUG=y - this option will produce library fit for debugging"
+ @echo "SHARED=n - this option will produce static library"
+ @echo "SAFE_DATA=n (default)"
+ @echo " - Sensitive data not cleared from registers and memory"
+ @echo " at operation end"
+ @echo "SAFE_DATA=y"
+ @echo " - Sensitive data cleared from registers and memory"
+ @echo " at operation end"
+ @echo "SAFE_PARAM=n (default)"
+ @echo " - API input parameters not checked"
+ @echo "SAFE_PARAM=y"
+ @echo " - API input parameters checked"
+ @echo "SAFE_LOOKUP=n"
+ @echo " - Lookups depending on sensitive data might not be constant time"
+ @echo "SAFE_LOOKUP=y (default)"
+ @echo " - Lookups depending on sensitive data are constant time"
+ @echo "GCM_BIG_DATA=n (default)"
+ @echo " - Smaller GCM key structure with good performance level (VAES)"
+ @echo " for packet processing applications (buffers size < 2K)"
+ @echo " - 8 ghash keys used on SSE, AVX, AVX2 and AVX512"
+ @echo " - 48 ghash keys used on AVX512 with VAES and VPCLMULQDQ"
+ @echo "GCM_BIG_DATA=y"
+ @echo " - Better performing VAES GCM on big buffers using more ghash keys."
+ @echo " This option results in a much bigger gcm_key structure (>2K)."
+ @echo " It only takes effect on platforms with VAES and VPCLMULQDQ."
+
+
+CHECKPATCH ?= checkpatch.pl
+# checkpatch ignore settings:
+# SPACING - produces false positives with tyepdefs and *
+# CONSTANT_COMPARISON - forbids defensive programming technique
+# USE_FUNC - produces false positives for Windows target
+# INITIALISED_STATIC, LEADING_SPACE, SPLIT_STRING, CODE_INDENT,
+# PREFER_ALIGNED, UNSPECIFIED_INT, ARRAY_SIZE, GLOBAL_INITIALISERS,
+# NEW_TYPEDEFS, AVOID_EXTERNS, COMPLEX_MACRO, BLOCK_COMMENT_STYLE
+# - found obsolete in this project
+#
+# NOTE: these flags cannot be broken into multiple lines due to
+# spaces injected by make
+CHECKPATCH_FLAGS = --no-tree --no-signoff --emacs --no-color --ignore CODE_INDENT,INITIALISED_STATIC,LEADING_SPACE,SPLIT_STRING,UNSPECIFIED_INT,ARRAY_SIZE,BLOCK_COMMENT_STYLE,GLOBAL_INITIALISERS,NEW_TYPEDEFS,AVOID_EXTERNS,COMPLEX_MACRO,PREFER_ALIGNED,USE_FUNC,CONSTANT_COMPARISON,SPACING
+
+%.c_style_check : %.c
+ $(CHECKPATCH) $(CHECKPATCH_FLAGS) -f $<
+
+%.h_style_check : %.h
+ $(CHECKPATCH) $(CHECKPATCH_FLAGS) -f $<
+
+%.asm_style_check : %.asm
+ $(CHECKPATCH) $(CHECKPATCH_FLAGS) -f $<
+
+%.inc_style_check : %.inc
+ $(CHECKPATCH) $(CHECKPATCH_FLAGS) -f $<
+
+SOURCES_DIRS := . sse avx avx2 avx512 include no-aesni
+SOURCES := $(foreach dir,$(SOURCES_DIRS),$(wildcard $(dir)/*.[ch]) $(wildcard $(dir)/*.asm) $(wildcard $(dir)/*.inc))
+SOURCES_STYLE := $(foreach infile,$(SOURCES),$(infile)_style_check)
+
+.PHONY: style
+style: $(SOURCES_STYLE)
+
+# if target not clean or rinse then make dependencies
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),style)
+-include $(dep_target_files)
+endif
+endif
diff --git a/src/spdk/intel-ipsec-mb/README b/src/spdk/intel-ipsec-mb/README
new file mode 100644
index 000000000..0529771f7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/README
@@ -0,0 +1,478 @@
+========================================================================
+README for Intel(R) Multi-Buffer Crypto for IPsec Library
+
+October 2019
+========================================================================
+
+
+Contents
+========
+
+1. Overview
+2. Processor Extensions
+3. Recommendations
+4. Package Content
+5. Compilation
+6. Security Considerations & Options for Increased Security
+7. Installation
+8. Disclaimer (ZUC, KASUMI, SNOW3G)
+9. Legal Disclaimer
+
+
+1. Overview
+===========
+Intel Multi-Buffer Crypto for IPsec Library is highly-optimized
+software implementations of the core cryptographic processing for IPsec,
+which provides industry-leading performance on a range of Intel(R) Processors.
+
+For information on how to build and use this library, see the
+Intel White Paper:
+"Fast Multi-buffer IPsec Implementations on Intel Architecture Processors".
+Jim Guilford, Sean Gulley, et. al.
+
+The easiest way to find it is to search the Internet for the title and
+Intel White Paper.
+
+Table 1. List of supported cipher algorithms and their implementations.
++---------------------------------------------------------------------+
+| | Implementation |
+| Encryption +-----------------------------------------------------|
+| | x86_64 | SSE | AVX | AVX2 | AVX512 | VAES(5)|
+|---------------+--------+--------+--------+--------+--------+--------|
+| AES128-GCM | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| AES192-GCM | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| AES256-GCM | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| AES128-CCM | N | Y by4 | Y by8 | N | N | N |
+| AES128-CBC | N | Y(1) | Y(3) | N | N | Y(6) |
+| AES192-CBC | N | Y(1) | Y(3) | N | N | Y(6) |
+| AES256-CBC | N | Y(1) | Y(3) | N | N | Y(6) |
+| AES128-CTR | N | Y by4 | Y by8 | N | N | Y by16 |
+| AES192-CTR | N | Y by4 | Y by8 | N | N | Y by16 |
+| AES256-CTR | N | Y by4 | Y by8 | N | N | Y by16 |
+| AES128-ECB | N | Y by4 | Y by4 | N | N | N |
+| AES192-ECB | N | Y by4 | Y by4 | N | N | N |
+| AES256-ECB | N | Y by4 | Y by4 | N | N | N |
+| NULL | Y | N | N | N | N | N |
+| AES128-DOCSIS | N | Y(2) | Y(4) | N | N | N |
+| DES-DOCSIS | Y | N | N | N | Y x16 | N |
+| 3DES | Y | N | N | N | Y x16 | N |
+| DES | Y | N | N | N | Y x16 | N |
+| KASUMI-F8 | Y | N | N | N | N | N |
+| ZUC-EEA3 | N | Y | Y | N | N | N |
+| SNOW3G-UEA2 | N | Y | Y | Y | N | N |
++---------------------------------------------------------------------+
+
+Notes:
+(1,2) - decryption is by4 and encryption is x4
+(3,4) - decryption is by8 and encryption is x8
+(5) - AVX512 plus VAES and VPCLMULQDQ extensions
+(6) - decryption is by16 and encryption is x16
+
+Legend:
+ byY - single buffer Y blocks at a time
+ xY - Y buffers at a time
+
+As an example of how to read table 1 and 2, if one uses AVX512 interface
+to perform AES128-CBC encryption then there is no native AVX512
+implementation for this cipher. In such case, the library uses best
+available implementation which is AVX for AES128-CBC.
+
+
+Table 2. List of supported integrity algorithms and their implementations.
++-------------------------------------------------------------------------+
+| | Implementation |
+| Integrity +-----------------------------------------------------|
+| | x86_64 | SSE | AVX | AVX2 | AVX512 | VAES(3)|
+|-------------------+--------+--------+--------+--------+--------+--------|
+| AES-XCBC-96 | N | Y x4 | Y x8 | N | N | N |
+| HMAC-MD5-96 | Y(1) | Y x4x2 | Y x4x2 | Y x8x2 | N | N |
+| HMAC-SHA1-96 | N | Y(2)x4 | Y x4 | Y x8 | Y x16 | N |
+| HMAC-SHA2-224_112 | N | Y(2)x4 | Y x4 | Y x8 | Y x16 | N |
+| HMAC-SHA2-256_128 | N | Y(2)x4 | Y x4 | Y x8 | Y x16 | N |
+| HMAC-SHA2-384_192 | N | Y x2 | Y x2 | Y x4 | Y x8 | N |
+| HMAC-SHA2-512_256 | N | Y x2 | Y x2 | Y x4 | Y x8 | N |
+| AES128-GMAC | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| AES192-GMAC | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| AES256-GMAC | N | Y by8 | Y by8 | Y by8 | Y by8 | Y by48 |
+| NULL | N | N | N | N | N | N |
+| AES128-CCM | N | Y x4 | Y x8 | N | N | N |
+| AES128-CMAC-96 | Y | Y x4 | Y x8 | N | N | N |
+| KASUMI-F9 | Y | N | N | N | N | N |
+| ZUC-EIA3 | N | Y | Y | N | N | N |
+| SNOW3G-UIA2 | N | Y | Y | Y | N | N |
++-------------------------------------------------------------------------+
+
+Notes:
+(1) - MD5 over one block implemented in C
+(2) - Implementation using SHANI extentions is x2
+(3) - AVX512 plus VAES and VPCLMULQDQ extensions
+
+Legend:
+ byY - single buffer Y blocks at a time
+ xY - Y buffers at a time
+
+Table 3. Encryption and integrity algorithm combinations
++---------------------------------------------------------------------+
+| Encryption | Allowed Integrity Algorithms |
+|---------------+-----------------------------------------------------|
+| AES128-GCM | AES128-GMAC |
+|---------------+-----------------------------------------------------|
+| AES192-GCM | AES192-GMAC |
+|---------------+-----------------------------------------------------|
+| AES256-GCM | AES256-GMAC |
+|---------------+-----------------------------------------------------|
+| AES128-CCM | AES128-CCM |
+|---------------+-----------------------------------------------------|
+| AES128-CBC, | AES-XCBC-96, |
+| AES192-CBC, | HMAC-SHA1-96, HMAC-SHA2-224_112, HMAC-SHA2-256_128, |
+| AES256-CBC, | HMAC-SHA2-384_192, HMAC-SHA2-512_256, |
+| AES128-CTR, | AES128-CMAC-96, |
+| AES192-CTR, | NULL |
+| AES256-CTR, | |
+| AES128-ECB, | |
+| AES192-ECB, | |
+| AES256-ECB, | |
+| NULL, | |
+| AES128-DOCSIS,| |
+| DES-DOCSIS, | |
+| 3DES, | |
+| DES, | |
+|---------------+-----------------------------------------------------|
+| KASUMI-F8 | KASUMI-F9 |
+|---------------+-----------------------------------------------------|
+| ZUC-EEA3 | ZUC-EIA3 |
+|---------------+-----------------------------------------------------|
+| SNOW3G-UEA3 | SNOW3G-UIA3 |
++---------------+-----------------------------------------------------+
+
+
+2. Processor Extensions
+=======================
+
+Table 4. Processor extensions used in the library
++-------------------------------------------------------------------------+
+| Algorithm | Interface | Extensions |
+|-------------------+-----------+-----------------------------------------|
+| HMAC-SHA1-96, | AVX512 | AVX512F, AVX512BW, AVX512VL |
+| HMAC-SHA2-224_112,| | |
+| HMAC-SHA2-256_128,| | |
+| HMAC-SHA2-384_192,| | |
+| HMAC-SHA2-512_256 | | |
+|-------------------+-----------+-----------------------------------------|
+| DES, 3DES, | AVX512 | AVX512F, AVX512BW |
+| DOCSIS-DES | | |
+|-------------------+-----------+-----------------------------------------|
+| HMAC-SHA1-96, | SSE | SHANI |
+| HMAC-SHA2-224_112,| | - presence is autodetected and library |
+| HMAC-SHA2-256_128,| | falls back to SSE implementation |
+| HMAC-SHA2-384_192,| | if not present |
+| HMAC-SHA2-512_256 | | |
+|-------------------+-----------+-----------------------------------------|
+
+
+3. Recommendations
+==================
+
+Legacy or to be avoided algorithms listed in the table below are implemented
+in the library in order to support legacy applications. Please use corresponding
+alternative algorithms instead.
+
++-------------------------------------------------------------+
+| # | Algorithm | Recommendation | Alternative |
+|---+--------------------+----------------+-------------------|
+| 1 | DES encryption | Avoid | AES encryption |
+|---+--------------------+----------------+-------------------|
+| 2 | 3DES encryption | Avoid | AES encryption |
+|---+--------------------+----------------+-------------------|
+| 3 | HMAC-MD5 integrity | Legacy | HMAC-SHA1 |
+|---+--------------------+----------------+-------------------|
+| 3 | AES-ECB encryption | Avoid | AES-CBC, AES-CNTR |
++-------------------------------------------------------------+
+
+Intel(R) Multi-Buffer Crypto for IPsec Library depends on C library and
+it is recommended to use its latest version.
+
+Applications using the Intel(R) Multi-Buffer Crypto for IPsec Library rely on
+Operating System to provide process isolation.
+As the result, it is recommended to use latest Operating System patches and
+security updates.
+
+4. Package Content
+==================
+
+LibTestApp - Library test applications
+LibPerfApp - Library performance application
+sse - Intel(R) SSE optimized routines
+avx - Intel(R) AVX optimized routines
+avx2 - Intel(R) AVX2 optimized routines
+avx512 - Intel(R) AVX512 optimized routines
+no-aesni - Non-AESNI accelerated routines
+
+Note:
+There is just one branch used in the project. All development is done on the
+master branch. Code taken from the tip of the master branch should not be
+considered fit for production.
+Refer to the releases tab for stable code versions:
+https://github.com/intel/intel-ipsec-mb/releases
+
+
+5. Compilation
+==============
+
+Linux (64-bit only)
+-------------------
+
+Required tools:
+- GNU make
+- NASM version 2.13.03 (or newer)
+- gcc (GCC) 4.8.3 (or newer)
+
+Shared library:
+> make
+
+Static library:
+> make SHARED=n
+
+Clean the build:
+> make clean
+or
+> make clean SHARED=n
+
+Build with debugging information:
+> make DEBUG=y
+
+Note: Building with debugging information is not advised for production use.
+
+For more build options and their explanation run:
+> make help
+
+Windows (x64 only)
+------------------
+
+Required tools:
+- Microsoft (R) Visual Studio 2015:
+ - NMAKE: Microsoft (R) Program Maintenance Utility Version 14.00.24210.0
+ - CL: Microsoft (R) C/C++ Optimizing Compiler Version 19.00.24215.1 for x64
+ - LIB: Microsoft (R) Library Manager Version 14.00.24215.1
+ - LINK: Microsoft (R) Incremental Linker Version 14.00.24215.1
+ - Note: Building on later versions should work but is not verified
+- NASM version 2.13.03 (or newer)
+
+Shared library (DLL):
+> nmake /f win_x64.mak
+
+Static library:
+> nmake /f win_x64.mak SHARED=n
+
+Clean the build:
+> nmake /f win_x64.mak clean
+or
+> nmake /f win_x64.mak clean SHARED=n
+
+Build with additional safety features:
+ - SAFE_DATA clears sensitive information stored in stack/registers
+ - SAFE_PARAM adds extra checks on input parameters
+ - SAFE_LOOKUP uses constant-time lookups (enabled by default)
+> nmake /f win_x64.mak SAFE_DATA=y SAFE_PARAM=y
+
+Build with debugging information:
+> nmake /f win_x64.mak DEBUG=y
+
+Note: Building with debugging information is not advised for production use.
+
+For more build options and their explanation run:
+> nmake /f win_x64.mak help
+
+FreeBSD (64-bit only)
+---------------------
+
+Required tools:
+- GNU make
+- NASM version 2.13.03 (or newer)
+- gcc (GCC) 4.8.3 (or newer) / clang 5.0 (or newer)
+
+Shared library:
+> gmake
+
+Static library:
+> gmake SHARED=n
+
+Clean the build:
+> gmake clean
+or
+> gmake clean SHARED=n
+
+Build with debugging information:
+> gmake DEBUG=y
+
+Note: Building with debugging information is not advised for production use.
+
+For more build options and their explanation run:
+> gmake help
+
+6. Security Considerations & Options for Increased Security
+===========================================================
+
+Security Considerations
+-----------------------
+The security of a system that uses cryptography depends on the strength of
+the cryptographic algorithms as well as the strength of the keys.
+Cryptographic key strength is dependent on several factors, with some of the
+most important factors including the length of the key, the entropy of the key
+bits, and maintaining the secrecy of the key.
+
+The selection of an appropriate algorithm and mode of operation critically
+affects the security of a system. Appropriate selection criteria is beyond the
+scope of this document and should be determined based upon usage, appropriate
+standards and consultation with a cryptographic expert. This library includes some
+algorithms, which are considered cryptographically weak and are included only
+for legacy and interoperability reasons. See the "Recommendations" section for
+more details.
+
+Secure creation of key material is not a part of this library. This library
+assumes that cryptographic keys have been created using approved methods with
+an appropriate and secure entropy source. Users of this library are
+referred to NIST SP800-133 Revision 1, Recommendation for Cryptographic Key
+Generation, found at https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-133r1.pdf
+
+Even with the use of strong cryptographic algorithms and robustly generated
+keys, software implementations of cryptographic algorithms may be attacked
+at the implementation through cache-timing attacks, buffer-over-reads, and
+other software vulnerabilities. Counter-measures against these types of
+attacks are possible but require additional processing cycles. Whether a
+particular system should provide such counter-measures depends on the threats
+to that system, and cannot be determined by a general library such as this
+one. In order to provide the most flexible implementation, this library allows
+certain counter-measures to be enabled or disabled at compile time. These
+options are listed below as the "Options for Increased Security" and are
+enabled through various build flags.
+
+Options for Increased Security
+------------------------------
+There are three build options that can be enabled to increase safety in the
+code and help protect external functions from incorrect input data.
+SAFE_DATA and SAFE_PARAM options are disabled by default, due to
+the potential performance impact associated to the extra code added.
+SAFE_LOOKUP option is enabled by default, and can be disabled
+by setting the parameter equal to "n" (e.g. make SAFE_LOOKUP=n).
+
+These options (explained below) can be enabled when building the library,
+by setting the parameter equal to "y" (e.g. make SAFE_DATA=y).
+No specific code has been added, and no specific validation or security
+tests have been performed to help protect against or check for side-channel
+attacks.
+
+SAFE_DATA
+---------
+
+Stack and registers containing sensitive information, such as keys or IVs,
+are cleared upon completion of a function call.
+
+SAFE_PARAM
+----------
+
+Input parameters are checked, looking generally for NULL pointers
+or an incorrect input length.
+
+SAFE_LOOKUP
+-----------
+
+Lookups which depend on sensitive information are implemented with
+constant time functions.
+Algorithms where these constant time functions are used are the following:
+
+- AESNI emulation
+- DES: SSE, AVX and AVX2 implementations
+- KASUMI: all architectures
+- SNOW3G: all architectures
+- ZUC: all architectures
+
+If SAFE_LOOKUP is not enabled in the build (e.g. make SAFE_LOOKUP=n) then the
+algorithms listed above may be susceptible to timing attacks which could expose
+the cryptographic key.
+
+7. Installation
+===============
+
+Linux (64-bit only)
+-------------------
+
+First compile the library and then install:
+> make
+> sudo make install
+
+To uninstall the library run:
+> sudo make uninstall
+
+If you want to change install location then define PREFIX
+> sudo make install PREFIX=<path>
+
+If there is no need to run ldconfig at install stage please use NOLDCONFIG=y option.
+> sudo make install NOLDCONFIG=y
+
+If library was compiled as an archive (not a default option) then install it
+using SHARED=n option:
+> sudo make install SHARED=n
+
+Windows (x64 only)
+------------------
+
+First compile the library and then install from a command prompt in
+administrator mode:
+> nmake /f win_x64.mak
+> nmake /f win_x64.mak install
+
+To uninstall the library run:
+> nmake /f win_x64.mak uninstall
+
+If you want to change install location then define PREFIX (default C:\Program Files)
+> nmake /f win_x64.mak install PREFIX=<path>
+
+If library was compiled as a static library (not a default option) then install it
+using SHARED=n option:
+> nmake /f win_x64.mak install SHARED=n
+
+FreeBSD (64-bit only)
+-------------------
+
+First compile the library and then install:
+> gmake
+> sudo gmake install
+
+To uninstall the library run:
+> sudo gmake uninstall
+
+If you want to change install location then define PREFIX
+> sudo gmake install PREFIX=<path>
+
+If there is no need to run ldconfig at install stage please use NOLDCONFIG=y option.
+> sudo gmake install NOLDCONFIG=y
+
+If library was compiled as an archive (not a default option) then install it
+using SHARED=n option:
+> sudo gmake install SHARED=n
+
+8. Disclaimer (ZUC, KASUMI, SNOW3G)
+===================================
+
+Please note that cryptographic material, such as ciphering algorithms, may be
+subject to national regulations. What is more, use of some algorithms in
+real networks and production equipment can be subject to agreement or
+licensing by the GSMA and/or the ETSI.
+
+For more details please see:
+- GSMA https://www.gsma.com/security/security-algorithms/
+- ETSI https://www.etsi.org/security-algorithms-and-codes/cellular-algorithm-licences
+
+
+9. Legal Disclaimer
+===================
+
+THIS SOFTWARE IS PROVIDED BY INTEL"AS IS". NO LICENSE, EXPRESS OR
+IMPLIED, BY ESTOPPEL OR OTHERWISE, TO ANY INTELLECTUAL PROPERTY RIGHTS
+ARE GRANTED THROUGH USE. EXCEPT AS PROVIDED IN INTEL'S TERMS AND
+CONDITIONS OF SALE, INTEL ASSUMES NO LIABILITY WHATSOEVER AND INTEL
+DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTY, RELATING TO SALE AND/OR
+USE OF INTEL PRODUCTS INCLUDING LIABILITY OR WARRANTIES RELATING TO
+FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, OR INFRINGEMENT
+OF ANY PATENT, COPYRIGHT OR OTHER INTELLECTUAL PROPERTY RIGHT.
diff --git a/src/spdk/intel-ipsec-mb/ReleaseNotes.txt b/src/spdk/intel-ipsec-mb/ReleaseNotes.txt
new file mode 100644
index 000000000..950dfac21
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/ReleaseNotes.txt
@@ -0,0 +1,575 @@
+========================================================================
+Release Notes for Intel(R) Multi-Buffer Crypto for IPsec Library
+
+v0.53 October 2019
+========================================================================
+
+Library
+- AES-CCM performance optimizations done
+ - full assembly implementation
+ - authentication decoupled from cipher
+ - CCM chain order expected to be HASH_CIPHER for encryption and
+ CIPHER_HASH for decryption
+- AES-CTR implementation for VAES added
+- AES-CBC implementation for VAES added
+- Single buffer AES-GCM performance improvements added for VPCLMULQDQ + VAES
+- Multi-buffer AES-GCM implementation added for VPCLMULQDQ + VAES
+- Data transposition optimizations and unification across the library
+ implemented
+- Generation of make dependency files for Linux added
+- AES-ECB implementation added
+- PON specific stitched algorithm implementation added
+ - stitched AES-CTR-128 (optional) with CRC32 and BIP (running 32-bit XOR)
+- AES-CMAC-128 implementation for bit length messages added
+- ZUC-EEA3 and ZUC-EIA3 implementation added
+- FreeBSD experimental support added
+- KASUMI-F8 and KASUMI-F9 implementation added
+- SNOW3G-UEA2 and SNOW3G-UIA2 implementation added
+- AES-CTR implementation for bit length (128-NEA2/192-NEA2/256-NEA2) messages added
+- SAFE_PARAM, SAFE_DATA and SAFE_LOOKUP compile time options added.
+ Find more about these options in the README file or on-line at
+ https://github.com/intel/intel-ipsec-mb/blob/master/README.
+
+LibTestApp
+- New API tests added
+- CMAC test vectors extended
+- New chained operation tests added
+- Out-of-place chained operation tests added
+- AES-ECB tests added
+- PON algorithm tests added
+- Extra AES-CTR test vectors added
+- Extra AES-CBC test vectors added
+- AES-CMAC-128 bit length message tests added
+- CPU capability detection used to disable tests if instruction not present
+- ZUC-EEA3 and ZUC-EIA3 tests added
+- New cross architecture test application (ipsec_xvalid) added,
+ which mixes different implementations (based on different architectures),
+ to double check their correctness
+- SNOW3G-UEA2 and SNOW3G-UIA2 tests added
+- AES-CTR-128 bit length message tests added
+- Negative tests extended to cover all API's
+
+LibPerfApp
+- Job size and number of iterations options added
+- Single architecture test option added
+- AAD size option added
+- Allow zero length source buffer option added
+- Custom performance test combination added:
+ cipher-algo, hash-algo and aead-algo arguments.
+- Cipher direction option added
+- The maximum buffer size extended from 2K to 16K
+- Support for user defined range of job sizes added
+
+Fixes
+- Uninitialized memory reported by Valgrind fixed
+- Flush decryption job fixed (issue #33)
+- NULL_CIPHER order check removed (issue #30)
+- Save XMM registers when emulating AES fixed (issue #28)
+- SSE & AVX AES-CMAC fixed (issue #27)
+- Missing GCM pointers fixed for AES-NI emulation (issue #29)
+
+v0.52 December 2018
+========================================================================
+
+03 Dec, 2018
+
+General
+- Added AESNI emulation implementation
+- Added AES-GCM multi-buffer implementation for AVX512
+- Added flexible job chain order support
+- GCM submit and flush functions moved into architecture MB manager modules
+- AVX512/AVX2/AVX/SSE AAD GHASH computation performance improvement
+- GCM API's added to MB_MGR structure
+- Added plain SHA support in JOB API
+- Added architectural compiler optimizations for GCC/CC
+
+LibTestApp
+- Added option not to run GCM tests
+- Added AESNI emulation tests
+- Added plain SHA tests
+- Updated to take advantage of new GCM macros
+
+LibPerfApp
+- Buffer alignment update
+- Updated to take advantage of new GCM macros
+
+v0.51 September 2018
+========================================================================
+
+13 Sep, 2018
+
+General
+- AES-CMAC performance optimizations
+- Implemented store to load optimizations in
+ - AES-CMAC submit and flush jobs for SSE and AVX
+ - HMAC-MD5, HMAC-SHA submit jobs for AVX
+ - HMAC-MD5 submit job for AVX2
+- Added zero-sized message support in GCM
+- Stack execution flag disabled in new asm modules
+
+LibTestApp
+- Added AES vectors
+- Added DOCSIS AES vectors
+- Added CFB validation
+
+LibPerfApp
+- Smoke test option added
+
+v0.50 June 2018
+========================================================================
+
+13 Jun, 2018
+
+General
+- Added support for compile time and runtime library version checking
+- Added support for full MD5 digest size
+- Replaced defines for API with symbols for binary compatibility
+- Added HMAC-SHA & HMAC-MD5 vectors to LibTestApp
+- Added support for zero cipher length in AES-CCM
+- Added new API's to compute SHA1, SHA224, SHA256, SHA384 and SHA512 hashes
+ to support key reduction cases where key is longer than a block size
+- Extended support for HMAC full digest sizes for HMAC-SHA1, HMAC-SHA224,
+ HMAC-SHA256, HMAC-SHA384 and HMAC-SHA512. Previously only truncated sizes
+ were supported.
+- Added AES-CMAC support for output digest size between 4 and 16 bytes
+- Added GHASH support for output digest size up to 16 bytes
+- Optimized submit job API's with store to load optimization in SSE, AVX,
+ AVX2 (excluding MD5)
+- Improved performance application accuracy by increase number of
+ test iterations
+- Extended multi-thread features of LibPerfApp Windows version to match
+ Linux version of the application
+
+v0.49 March 2018
+========================================================================
+
+21 Mar, 2018
+
+General
+- AES-CMAC support added (AES-CMAC-128 and AES-CMAC-96)
+- 3DES support added
+- Library compiles to SO/DLL by default
+- Install/uninstall targets added to makefiles
+- Multiple API header files consolidated into one (intel-ipsec-mb.h)
+- Unhalted cycles support added to LibPerfApp (Linux at the moment)
+- ELF stack execute protection added for assembly files
+- VZEROUPPER instruction issued after AVX2/AVX512 code to avoid
+ expensive SSE<->AVX transitions
+- MAN page added
+- README documentation extensions and updates
+- AVX512 DES performance smoothed out
+- Multi-buffer manager instance allocate and free API's added
+- Core affinity support added in LibPerfApp
+
+v0.48 December 2017
+========================================================================
+
+12 Dec, 2017
+
+General
+- Linux SO compilation option added
+- Windows DLL compilation option added
+- AES CCM 128 support added
+- Multithread command line option added to LibPerfApp
+- Coding style fixes
+- Coding style target added to Makefile
+
+v0.47 October 2017
+========================================================================
+
+Oct 5, 2017
+
+Intel(R) AVX-512 Instructions
+- DES CBC AVX512 implementation
+- DOCSIS DES AVX512 implementation
+General
+- DES CBC cipher added (generic x86 implementation)
+- DOCSIS DES cipher added (generic x86 implementation)
+- DES and DOCSIS DES tests added
+- RPM SPEC file created
+
+v0.46 June 2017
+========================================================================
+
+Jun 27, 2017
+
+General
+- AES GCM optimizations for AVX2
+- Change of AES GCM API: renamed and expanded keys separated from the context
+- New AES GCM API via job structure and API's
+ - use of the interface may simplify application design at the expense of
+ slightly lower performance vs direct AES GCM API's
+- AES GCM IV automatically padded with block counter (no need for application to do it)
+- IV in AES CTR mode can be 12 bytes (no block counter); 16 byte format still allowed
+- Macros added to ease access to job API for specific architecture
+ - use of these macros can simplify application design but it may produce worse
+ performance than calling architecture job API's directly
+- Submit_job_nocheck() API added to gain some cycles by not validating job structure
+- Result stability improvements in LibPerfApp
+
+v0.45 March 2017
+========================================================================
+
+Mar 29, 2017
+
+Intel(R) AVX-512 Instructions
+- Added optimized HMAC-SHA224 and HMAC-SHA256
+- Added optimized HMAC-SHA384 and HMAC-SHA512
+General
+- Windows x64 compilation target
+- New DOCSIS SEC BPI V3.1 cipher
+- GCM128 and GCM256 updates (with new API that is scatter gather list friendly)
+- GCM192 added
+- Added library API benchmark tool 'ipsec_perf' and
+ script to compare results 'ipsec_diff_tool.py'
+Bug Fixes (vs v0.44)
+- AES CTR mode fix to allow message size not to be multiple of AES block size
+- RSI and RDI registers clobbered when running HMAC-SHA224 or HMAC-SHA256
+ on Windows using SHA extensions
+
+v0.44 November 2016
+========================================================================
+
+Nov 21, 2016
+
+Intel(R) AVX-512 Instructions
+- AVX512 multi buffer manager added (uses AVX2 implementations by default)
+- Optimized SHA1 implementation added
+Intel(R) SHA Extensions
+- SHA1, SHA224 and SHA256 implementations added for Intel(R) SSE
+General
+- NULL cipher added
+- NULL hash added
+- NASM tool chain compilation added (default)
+
+=======================================
+Feb 11, 2015
+
+Fixed, so that the job auth_tag_output_len_in_bytes takes a different
+value for different MAC types. In particular, the valid values are(in bytes):
+SHA1 - 12
+sha224 - 14
+SHA256 - 16
+sha384 - 24
+SHA512 - 32
+XCBC - 12
+MD5 - 12
+
+=======================================
+Oct 24, 2011
+
+SHA_256 added to multibuffer
+------------------------
+12 Aug 2011
+
+API
+
+ The GCM API is distinct from the Multi-buffer API. This is because
+ the GCM code is an optimized single-buffer implementation. By
+ packaging them separately, the application has the option of where,
+ when, and how to call the GCM code, independent of how it is calling
+ the multi-buffer code.
+
+ For example, the application might be enqueing multi-buffer requests
+ for a separate thread to process. In this scenario, if a particular
+ packet used GCM, then the application could choose whether to call
+ the GCM routines directly, or whether to enqueue those requests and
+ have the compute thread call the GCM routines.
+
+GCM API
+
+ The GCM functions are defined as described the the header
+ files. They are simple computational routines, with no state
+ associated with them.
+
+Multi-Buffer API: Two Sets of Functions
+
+ There are two parallel interfaces, one suffixed with "_sse" and one
+ suffixed with "_avx". These are functionally equivalent. The "_sse"
+ functions work on WSM and later processors. The "_avx" functions
+ offer better performance, but they only run on processors after WSM.
+
+ The same interface object structures are used for both sets of
+ interfaces, although one cannot mix the two interfaces on the same
+ initialized object (e.g. it would be wrong to initialize with
+ init_mb_mgr_sse() and then to pass that to submit_job_avx() ). After
+ the MB_MGR structure has been initialized with one of the two
+ initialization functions (init_mb_mgr_sse() or init_mb_mgr_avx()),
+ only the corresponding functions should be used on it.
+
+ There are several ways in which an application could use these
+ interfaces.
+
+ 1) Direct
+ If an application is only going to be run on a post-WSM machine,
+ it can just call the "_avx" functions directly. Conversely, if it
+ is just going to be run on WSM machines, it can call the "_sse"
+ functions directly.
+
+ 2) Via Branches
+ If an application can run on both WSM and SNB and wants the
+ improved performance on SNB, then it can use some method to
+ determine if it is on SNB, and then use a conditional branch to
+ determine which function to call. E.g. this could be wrapped in a
+ macro along the lines of:
+ #define submit_job(mb_mgr) \
+ if (_use_avx) submit_job_avx(mb_mgr); \
+ else submit_job_sse(mb_mgr)
+
+ 3) Via a Function Table
+ One can embed the function addresses into a structure, call them
+ through this structure, and change the structure based on which
+ set of functions one wishes to use, e.g.
+
+ struct funcs_t {
+ init_mb_mgr_t init_mb_mgr;
+ get_next_job_t get_next_job;
+ submit_job_t submit_job;
+ get_completed_job_t get_completed_job;
+ flush_job_t flush_job;
+ };
+
+ funcs_t funcs_sse = {
+ init_mb_mgr_sse,
+ get_next_job_sse,
+ submit_job_sse,
+ get_completed_job_sse,
+ flush_job_sse
+ };
+ funcs_t funcs_avx = {
+ init_mb_mgr_avx,
+ get_next_job_avx,
+ submit_job_avx,
+ get_completed_job_avx,
+ flush_job_avx
+ };
+ funcs_t *funcs = &funcs_sse;
+ ...
+ if (do_avx)
+ funcs = &funcs_avx;
+ ...
+ funcs->init_mb_mgr(&mb_mgr);
+
+ For simplicity in the rest of this document, the functions will be
+ refered to no suffix.
+
+API: Overview
+
+ The basic unit of work is a "job". It is represented by a
+ JOB_AES_HMAC structure. It contains all of the information needed to
+ perform encryption/decryption and SHA1/HMAC authentication on one
+ buffer for IPSec processing.
+
+ The basic paradigm is that the application needs to be able to
+ provide new jobs before old jobs have completed processing. One
+ might call this an "asynchronous" interface.
+
+ The basic interface is that the application "submits" a job to the
+ multi-buffer manager (MB_MGR), and it may receive a completed job
+ back, or it may receive NULL. The returned job, if there is one,
+ will not be the same as the submitted job, but the jobs will be
+ returned in the same order in which they are submitted.
+
+ Since there can be a semi-arbitrary number of outstanding jobs,
+ management of the job object is handled by the MB_MGR. The
+ application gets a pointer to a new job object by calling
+ get_next_job(). It then fills in the data fields and submits it by
+ calling submit_job(). If a job is returned, then that job has been
+ completed, and the application should do whatever it needs to do in
+ order to further process that buffer.
+
+ The job object is not explicitly returned to the MB_MGR. Rather it
+ is implicitly returned by the next call to get_next_job(). Another
+ way to put this is that the data within the job object is
+ guaranteed to be valid until the next call to get_next_job().
+
+ In order to reduce latency, there is an optional function that may
+ be called, get_completed_job(). This returns the next job if that
+ job has previously been completed. But if that job has not been
+ completed, no processing is done, and the function returns
+ NULL. This may be used to reduce the number of outstanding jobs
+ within the MB_MGR.
+
+ At times, it may be necessary to process the jobs currently within
+ the MB_MGR without providing new jobs as input. This process is
+ called "flushing", and it is invoked by calling flush_job(). If
+ there are any jobs within the MB_MGR, this will complete processing
+ on the earliest job and return it. It will only return NULL if there
+ are no jobs within the MB_MGR.
+
+ Flushing will be described in more detail below.
+
+ The presumption is that the same AES key will apply to a number of
+ buffers. For increased efficiency, it requires that the AES key
+ expansion happens as a distinct step apart from buffer
+ encryption/decryption. The expanded keys are stored in a data
+ structure (array), and this expanded key structure is used by the
+ job object.
+
+ There are two variants provided, MB_MGR and MB_MGR2. They are
+ functionally equivalent. The reason that two are provided is that
+ they differ slightly in their implementation, and so they may have
+ slightly different characteristics in terms of latency and overhead.
+
+API: Usage Skeleton
+ The basic usage is illustrated in the following pseudo_code:
+
+ init_mb_mgr(&mb_mgr);
+ ...
+ aes_keyexp_128(key, enc_exp_keys, dec_exp_keys);
+ ...
+ while (work_to_be_done) {
+ job = get_next_job(&mb_mgr);
+ // TODO: Fill in job fields
+ job = submit_job(&mb_mgr);
+ while (job) {
+ // TODO: Complete processing on job
+ job = get_completed_job(&mb_mgr);
+ }
+ }
+
+API: Job Fields
+ The mode is determined by the fields "cipher_direction" and
+ "chain_order". The first specifies encrypt or decrypt, and the
+ second specifies whether whether the hash should be done before or
+ after the cipher operation.
+ In the current implementation, only two combinations of these are
+ supported. For encryption, these should be set to "ENCRYPT" and
+ "CIPHER_HASH", and for decryption, these should be set to "DECRYPT"
+ and "HASH_CIPHER".
+
+ The expanded keys are pointed to by "aes_enc_key_expanded" and
+ "aes_dec_key_expanded". These arrays must be aligned on a 16-byte
+ boundary. Only one of these is necessary (as determined by
+ "cipher_direction").
+
+ One selects AES128 vs AES256 by using the "aes_key_len_in_bytes"
+ field. The only valid values are 16 (AES128) and 32 (AES256).
+
+ One selects the AES mode (CBC versus counter-mode) using
+ "cipher_mode".
+
+ One selects the hash algorith (SHA1-HMAC, AES-XCBC, or MD5-HMAC)
+ using "hash_alg".
+
+ The data to be encrypted/decrypted is defined by
+ "src + cipher_start_src_offset_in_bytes". The length of data is
+ given by "msg_len_to_cipher_in_bytes". It must be a multiple of
+ 16 bytes.
+
+ The destination for the cipher operation is given by "dst" (NOT by
+ "dst + cipher_start_src_offset_in_bytes". In many/most applications,
+ the destination pointer may overlap the source pointer. That is,
+ "dst" may be equal to "src + cipher_start_src_offset_in_bytes".
+
+ The IV for the cipher operation is given by "iv". The
+ "iv_len_in_bytes" should be 16. This pointer does not need to be
+ aligned.
+
+ The data to be hashed is defined by
+ "src + hash_start_src_offset_in_bytes". The length of data is
+ given by "msg_len_to_hash_in_bytes".
+
+ The output of the hash operation is defined by
+ "auth_tag_output". The number of bytes written is given by
+ "auth_tag_output_len_in_bytes". Currently the only valid value for
+ this parameter is 12.
+
+ The ipad and opad are given as the result of hashing the HMAC key
+ xor'ed with the appropriate value. That is, rather than passing in
+ the HMAC key and rehashing the initial block for every buffer, the
+ hashing of the initial block is done separately, and the results of
+ this hash are used as input in the job structure.
+
+ Similar to the expanded AES keys, the premise here is that one HMAC
+ key will apply to many buffers, so we want to do that hashing once
+ and not for each buffer.
+
+ The "status" reflects the status of the returned job. It should be
+ "STS_COMPLETED".
+
+ The "user_data" field is ignored. It can be used to attach
+ application data to the job object.
+
+Flushing Concerns
+ As long as jobs are coming in at a reasonable rate, jobs should be
+ returned at a reasonable rate. However, if there is a lull in the
+ arrival of new jobs, the last few jobs that were submitted tend to
+ stay in the MB_MGR until new jobs arrive. This might result in there
+ being an unreasonable latency for these jobs.
+
+ In this case, flush_job() should be used to complete processing on
+ these outstanding jobs and prevent them from having excessive
+ latency.
+
+ Exactly when and how to use flush_job() is up to the application,
+ and is a balancing act. The processing of flush_job() is less
+ efficient than that of submit_job(), so calling flush_job() too
+ often will lower the system efficiency. Conversely, calling
+ flush_job() too rarely may result in some jobs seeing excessive
+ latency.
+
+ There are several strategies that the application may employ for
+ flushing. One usage model is that there is a (thread-safe) queue
+ containing work items. One or more threads puts work onto this
+ queue, and one or more processing threads removes items from this
+ queue and processes them through the MB_MGR. In this usage, a simple
+ flushing strategy is that when the processing thread wants to do
+ more work, but the queue is empty, it then proceeds to flush jobs
+ until either the queue contains more work, or the MB_MGR no longer
+ contains jobs (i.e. that flush_job() returns NULL). A variation on
+ this is that when the work queue is empty, the processing thread
+ might pause for a short time to see if any new work appears, before
+ it starts flushing.
+
+ In other usage models, there may be no such queue. An alternate
+ flushing strategy is that have a separate "flush thread" hanging
+ around. It wakes up periodically and checks to see if any work has
+ been requested since the last time it woke up. If some period of
+ time has gone by with no new work appearing, it would proceed to
+ flush the MB_MGR.
+
+AES Key Usage
+ If the AES mode is CBC, then the fields aes_enc_key_expanded or
+ aes_dec_key_expanded are using depending on whether the data is
+ being encrypted or decrypted. However, if the AES mode is CNTR
+ (counter mode), then only aes_enc_key_expanded is used, even for a
+ decrypt operation.
+
+ The application can handle this dichotomy, or it might choose to
+ simply set both fields in all cases.
+
+Thread Safety
+ The MB_MGR and the associated functions ARE NOT thread safe. If
+ there are multiple threads that may be calling these functions
+ (e.g. a processing thread and a flushing thread), it is the
+ responsibility of the application to put in place sufficient locking
+ so that no two threads will make calls to the same MB_MGR object at
+ the same time.
+
+XMM Register Usage
+ The current implementation is designed for integration in the Linux
+ Kernel. All of the functions satisfy the Linux ABI with respect to
+ general purpose registers. However, the submit_job() and flush_job()
+ functions use XMM registers without saving/restoring any of them. It
+ is up to the application to manage the saving/restoring of XMM
+ registers itself.
+
+Auxiliary Functions
+ There are several auxiliary functions packed with MB_MGR. These may
+ be used, or the application may choose to use their own version. Two
+ of these, aes_keyexp_128() and aes_keyexp_256() expand AES keys into
+ a form that is acceptable for reference in the job structure.
+
+ In the case of AES128, the expanded key structure should be an array
+ of 11 128-bit words, aligned on a 16-byte boundary. In the case of
+ AES256, it should be an array of 15 128-bit words, aligned on a
+ 16-byte boundary.
+
+ There is also a function, sha1(), which will compute the SHA1 digest
+ of a single 64-byte block. It can be used to compute the ipad and
+ opad digests. There is a similar function, md5(), which can be used
+ when using MD5-HMAC.
+
+ For further details on the usage of these functions, see the sample
+ test application.
diff --git a/src/spdk/intel-ipsec-mb/aes128_ecbenc_x3.asm b/src/spdk/intel-ipsec-mb/aes128_ecbenc_x3.asm
new file mode 100644
index 000000000..e13b8467d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes128_ecbenc_x3.asm
@@ -0,0 +1,346 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; Routines to do simple AES ECB Enc on one stream with 3 blocks
+
+;void
+; aes128_ecbenc_x3_sse(void *in, void *keys, void *out1, void *out2, void *out3);
+;void
+; aes128_ecbenc_x3_avx(void *in, void *keys, void *out1, void *out2, void *out3);
+
+%include "include/os.asm"
+%define NO_AESNI_RENAME
+%include "include/aesni_emu.inc"
+%include "include/clear_regs.asm"
+
+%ifdef LINUX
+%define IN rdi ; arg 1
+%define KEYS rsi ; arg 2
+%define OUT0 rdx ; arg 3
+%define OUT1 rcx ; arg 4
+%define OUT2 r8 ; arg 5
+%else
+%define IN rcx ; arg 1
+%define KEYS rdx ; arg 2
+%define OUT0 r8 ; arg 3
+%define OUT1 r9 ; arg 4
+%define OUT2 rax ;
+%endif
+
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+
+%define XKEYA xmm3
+%define XKEYB xmm4
+
+section .text
+
+MKGLOBAL(aes128_ecbenc_x3_sse,function,internal)
+aes128_ecbenc_x3_sse:
+
+%ifndef LINUX
+ mov OUT2, [rsp + 5*8]
+%endif
+
+%ifdef SAFE_PARAM
+ cmp IN, 0
+ jz aes128_ecbenc_x3_sse_return
+ cmp KEYS, 0
+ jz aes128_ecbenc_x3_sse_return
+ cmp OUT0, 0
+ jz aes128_ecbenc_x3_sse_return
+ cmp OUT1, 0
+ jz aes128_ecbenc_x3_sse_return
+ cmp OUT2, 0
+ jz aes128_ecbenc_x3_sse_return
+%endif
+
+ movdqu XDATA0, [IN + 0*16] ; load first block of plain text
+ movdqu XDATA1, [IN + 1*16] ; load second block of plain text
+ movdqu XDATA2, [IN + 2*16] ; load third block of plain text
+
+ movdqa XKEYA, [KEYS + 16*0]
+
+ movdqa XKEYB, [KEYS + 16*1]
+ pxor XDATA0, XKEYA ; 0. ARK
+ pxor XDATA1, XKEYA ; 0. ARK
+ pxor XDATA2, XKEYA ; 0. ARK
+
+ movdqa XKEYA, [KEYS + 16*2]
+ aesenc XDATA0, XKEYB ; 1. ENC
+ aesenc XDATA1, XKEYB ; 1. ENC
+ aesenc XDATA2, XKEYB ; 1. ENC
+
+ movdqa XKEYB, [KEYS + 16*3]
+ aesenc XDATA0, XKEYA ; 2. ENC
+ aesenc XDATA1, XKEYA ; 2. ENC
+ aesenc XDATA2, XKEYA ; 2. ENC
+
+ movdqa XKEYA, [KEYS + 16*4]
+ aesenc XDATA0, XKEYB ; 3. ENC
+ aesenc XDATA1, XKEYB ; 3. ENC
+ aesenc XDATA2, XKEYB ; 3. ENC
+
+ movdqa XKEYB, [KEYS + 16*5]
+ aesenc XDATA0, XKEYA ; 4. ENC
+ aesenc XDATA1, XKEYA ; 4. ENC
+ aesenc XDATA2, XKEYA ; 4. ENC
+
+ movdqa XKEYA, [KEYS + 16*6]
+ aesenc XDATA0, XKEYB ; 5. ENC
+ aesenc XDATA1, XKEYB ; 5. ENC
+ aesenc XDATA2, XKEYB ; 5. ENC
+
+ movdqa XKEYB, [KEYS + 16*7]
+ aesenc XDATA0, XKEYA ; 6. ENC
+ aesenc XDATA1, XKEYA ; 6. ENC
+ aesenc XDATA2, XKEYA ; 6. ENC
+
+ movdqa XKEYA, [KEYS + 16*8]
+ aesenc XDATA0, XKEYB ; 7. ENC
+ aesenc XDATA1, XKEYB ; 7. ENC
+ aesenc XDATA2, XKEYB ; 7. ENC
+
+ movdqa XKEYB, [KEYS + 16*9]
+ aesenc XDATA0, XKEYA ; 8. ENC
+ aesenc XDATA1, XKEYA ; 8. ENC
+ aesenc XDATA2, XKEYA ; 8. ENC
+
+ movdqa XKEYA, [KEYS + 16*10]
+ aesenc XDATA0, XKEYB ; 9. ENC
+ aesenc XDATA1, XKEYB ; 9. ENC
+ aesenc XDATA2, XKEYB ; 9. ENC
+
+ aesenclast XDATA0, XKEYA ; 10. ENC
+ aesenclast XDATA1, XKEYA ; 10. ENC
+ aesenclast XDATA2, XKEYA ; 10. ENC
+
+ movdqu [OUT0], XDATA0 ; write back ciphertext
+ movdqu [OUT1], XDATA1 ; write back ciphertext
+ movdqu [OUT2], XDATA2 ; write back ciphertext
+
+aes128_ecbenc_x3_sse_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+MKGLOBAL(aes128_ecbenc_x3_sse_no_aesni,function,internal)
+aes128_ecbenc_x3_sse_no_aesni:
+
+%ifndef LINUX
+ mov OUT2, [rsp + 5*8]
+%endif
+
+%ifdef SAFE_PARAM
+ cmp IN, 0
+ jz aes128_ecbenc_x3_sse_no_aesni_return
+ cmp KEYS, 0
+ jz aes128_ecbenc_x3_sse_no_aesni_return
+ cmp OUT0, 0
+ jz aes128_ecbenc_x3_sse_no_aesni_return
+ cmp OUT1, 0
+ jz aes128_ecbenc_x3_sse_no_aesni_return
+ cmp OUT2, 0
+ jz aes128_ecbenc_x3_sse_no_aesni_return
+%endif
+
+ movdqu XDATA0, [IN + 0*16] ; load first block of plain text
+ movdqu XDATA1, [IN + 1*16] ; load second block of plain text
+ movdqu XDATA2, [IN + 2*16] ; load third block of plain text
+
+ movdqa XKEYA, [KEYS + 16*0]
+
+ movdqa XKEYB, [KEYS + 16*1]
+ pxor XDATA0, XKEYA ; 0. ARK
+ pxor XDATA1, XKEYA ; 0. ARK
+ pxor XDATA2, XKEYA ; 0. ARK
+
+ movdqa XKEYA, [KEYS + 16*2]
+ EMULATE_AESENC XDATA0, XKEYB ; 1. ENC
+ EMULATE_AESENC XDATA1, XKEYB ; 1. ENC
+ EMULATE_AESENC XDATA2, XKEYB ; 1. ENC
+
+ movdqa XKEYB, [KEYS + 16*3]
+ EMULATE_AESENC XDATA0, XKEYA ; 2. ENC
+ EMULATE_AESENC XDATA1, XKEYA ; 2. ENC
+ EMULATE_AESENC XDATA2, XKEYA ; 2. ENC
+
+ movdqa XKEYA, [KEYS + 16*4]
+ EMULATE_AESENC XDATA0, XKEYB ; 3. ENC
+ EMULATE_AESENC XDATA1, XKEYB ; 3. ENC
+ EMULATE_AESENC XDATA2, XKEYB ; 3. ENC
+
+ movdqa XKEYB, [KEYS + 16*5]
+ EMULATE_AESENC XDATA0, XKEYA ; 4. ENC
+ EMULATE_AESENC XDATA1, XKEYA ; 4. ENC
+ EMULATE_AESENC XDATA2, XKEYA ; 4. ENC
+
+ movdqa XKEYA, [KEYS + 16*6]
+ EMULATE_AESENC XDATA0, XKEYB ; 5. ENC
+ EMULATE_AESENC XDATA1, XKEYB ; 5. ENC
+ EMULATE_AESENC XDATA2, XKEYB ; 5. ENC
+
+ movdqa XKEYB, [KEYS + 16*7]
+ EMULATE_AESENC XDATA0, XKEYA ; 6. ENC
+ EMULATE_AESENC XDATA1, XKEYA ; 6. ENC
+ EMULATE_AESENC XDATA2, XKEYA ; 6. ENC
+
+ movdqa XKEYA, [KEYS + 16*8]
+ EMULATE_AESENC XDATA0, XKEYB ; 7. ENC
+ EMULATE_AESENC XDATA1, XKEYB ; 7. ENC
+ EMULATE_AESENC XDATA2, XKEYB ; 7. ENC
+
+ movdqa XKEYB, [KEYS + 16*9]
+ EMULATE_AESENC XDATA0, XKEYA ; 8. ENC
+ EMULATE_AESENC XDATA1, XKEYA ; 8. ENC
+ EMULATE_AESENC XDATA2, XKEYA ; 8. ENC
+
+ movdqa XKEYA, [KEYS + 16*10]
+ EMULATE_AESENC XDATA0, XKEYB ; 9. ENC
+ EMULATE_AESENC XDATA1, XKEYB ; 9. ENC
+ EMULATE_AESENC XDATA2, XKEYB ; 9. ENC
+
+ EMULATE_AESENCLAST XDATA0, XKEYA ; 10. ENC
+ EMULATE_AESENCLAST XDATA1, XKEYA ; 10. ENC
+ EMULATE_AESENCLAST XDATA2, XKEYA ; 10. ENC
+
+ movdqu [OUT0], XDATA0 ; write back ciphertext
+ movdqu [OUT1], XDATA1 ; write back ciphertext
+ movdqu [OUT2], XDATA2 ; write back ciphertext
+
+aes128_ecbenc_x3_sse_no_aesni_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+MKGLOBAL(aes128_ecbenc_x3_avx,function,internal)
+aes128_ecbenc_x3_avx:
+
+%ifndef LINUX
+ mov OUT2, [rsp + 5*8]
+%endif
+
+%ifdef SAFE_PARAM
+ cmp IN, 0
+ jz aes128_ecbenc_x3_avx_return
+ cmp KEYS, 0
+ jz aes128_ecbenc_x3_avx_return
+ cmp OUT0, 0
+ jz aes128_ecbenc_x3_avx_return
+ cmp OUT1, 0
+ jz aes128_ecbenc_x3_avx_return
+ cmp OUT2, 0
+ jz aes128_ecbenc_x3_avx_return
+%endif
+
+ vmovdqu XDATA0, [IN + 0*16] ; load first block of plain text
+ vmovdqu XDATA1, [IN + 1*16] ; load second block of plain text
+ vmovdqu XDATA2, [IN + 2*16] ; load third block of plain text
+
+ vmovdqa XKEYA, [KEYS + 16*0]
+
+ vmovdqa XKEYB, [KEYS + 16*1]
+ vpxor XDATA0, XDATA0, XKEYA ; 0. ARK
+ vpxor XDATA1, XDATA1, XKEYA ; 0. ARK
+ vpxor XDATA2, XDATA2, XKEYA ; 0. ARK
+
+ vmovdqa XKEYA, [KEYS + 16*2]
+ vaesenc XDATA0, XKEYB ; 1. ENC
+ vaesenc XDATA1, XKEYB ; 1. ENC
+ vaesenc XDATA2, XKEYB ; 1. ENC
+
+ vmovdqa XKEYB, [KEYS + 16*3]
+ vaesenc XDATA0, XKEYA ; 2. ENC
+ vaesenc XDATA1, XKEYA ; 2. ENC
+ vaesenc XDATA2, XKEYA ; 2. ENC
+
+ vmovdqa XKEYA, [KEYS + 16*4]
+ vaesenc XDATA0, XKEYB ; 3. ENC
+ vaesenc XDATA1, XKEYB ; 3. ENC
+ vaesenc XDATA2, XKEYB ; 3. ENC
+
+ vmovdqa XKEYB, [KEYS + 16*5]
+ vaesenc XDATA0, XKEYA ; 4. ENC
+ vaesenc XDATA1, XKEYA ; 4. ENC
+ vaesenc XDATA2, XKEYA ; 4. ENC
+
+ vmovdqa XKEYA, [KEYS + 16*6]
+ vaesenc XDATA0, XKEYB ; 5. ENC
+ vaesenc XDATA1, XKEYB ; 5. ENC
+ vaesenc XDATA2, XKEYB ; 5. ENC
+
+ vmovdqa XKEYB, [KEYS + 16*7]
+ vaesenc XDATA0, XKEYA ; 6. ENC
+ vaesenc XDATA1, XKEYA ; 6. ENC
+ vaesenc XDATA2, XKEYA ; 6. ENC
+
+ vmovdqa XKEYA, [KEYS + 16*8]
+ vaesenc XDATA0, XKEYB ; 7. ENC
+ vaesenc XDATA1, XKEYB ; 7. ENC
+ vaesenc XDATA2, XKEYB ; 7. ENC
+
+ vmovdqa XKEYB, [KEYS + 16*9]
+ vaesenc XDATA0, XKEYA ; 8. ENC
+ vaesenc XDATA1, XKEYA ; 8. ENC
+ vaesenc XDATA2, XKEYA ; 8. ENC
+
+ vmovdqa XKEYA, [KEYS + 16*10]
+ vaesenc XDATA0, XKEYB ; 9. ENC
+ vaesenc XDATA1, XKEYB ; 9. ENC
+ vaesenc XDATA2, XKEYB ; 9. ENC
+
+ vaesenclast XDATA0, XKEYA ; 10. ENC
+ vaesenclast XDATA1, XKEYA ; 10. ENC
+ vaesenclast XDATA2, XKEYA ; 10. ENC
+
+ vmovdqu [OUT0], XDATA0 ; write back ciphertext
+ vmovdqu [OUT1], XDATA1 ; write back ciphertext
+ vmovdqu [OUT2], XDATA2 ; write back ciphertext
+
+aes128_ecbenc_x3_avx_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
diff --git a/src/spdk/intel-ipsec-mb/aes_cmac_subkey_gen.asm b/src/spdk/intel-ipsec-mb/aes_cmac_subkey_gen.asm
new file mode 100644
index 000000000..04d057be4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes_cmac_subkey_gen.asm
@@ -0,0 +1,375 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%define NO_AESNI_RENAME
+%include "include/aesni_emu.inc"
+%include "include/clear_regs.asm"
+
+;;; Routines to generate subkeys for AES-CMAC.
+;;; See RFC 4493 for more details.
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers:
+;; Windows preserves: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers:
+;; Linux preserves: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0, xmm1, xmm2
+;;
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%define arg5 r8
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%define arg5 [rsp + 5*8]
+%endif
+
+%define KEY_EXP arg1
+%define KEY1 arg2
+%define KEY2 arg3
+
+%define XL xmm0
+%define XKEY1 xmm1
+%define XKEY2 xmm2
+
+
+section .data
+default rel
+
+align 16
+xmm_bit127:
+ ;ddq 0x80000000000000000000000000000000
+ dq 0x0000000000000000, 0x8000000000000000
+
+align 16
+xmm_bit63:
+ ;ddq 0x00000000000000008000000000000000
+ dq 0x8000000000000000, 0x0000000000000000
+
+align 16
+xmm_bit64:
+ ;ddq 0x00000000000000010000000000000000
+ dq 0x0000000000000000, 0x0000000000000001
+
+align 16
+const_Rb:
+ ;ddq 0x00000000000000000000000000000087
+ dq 0x0000000000000087, 0x0000000000000000
+
+align 16
+byteswap_const:
+ ;DDQ 0x000102030405060708090A0B0C0D0E0F
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+
+section .text
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;
+;;; void aes_cmac_subkey_gen_sse(const void *key_exp, void *key1, void *key2)
+;;;
+;;; key_exp : IN : address of expanded encryption key structure (AES 128)
+;;; key1 : OUT : address to store subkey 1 (AES128 - 16 bytes)
+;;; key2 : OUT : address to store subkey 2 (AES128 - 16 bytes)
+;;;
+;;; RFC 4493 Figure 2.2 describing function operations at highlevel
+;;;
+;;; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+;;; + Algorithm Generate_Subkey +
+;;; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+;;; + +
+;;; + Input : K (128-bit key) +
+;;; + Output : K1 (128-bit first subkey) +
+;;; + K2 (128-bit second subkey) +
+;;; +-------------------------------------------------------------------+
+;;; + +
+;;; + Constants: const_Zero is 0x00000000000000000000000000000000 +
+;;; + const_Rb is 0x00000000000000000000000000000087 +
+;;; + Variables: L for output of AES-128 applied to 0^128 +
+;;; + +
+;;; + Step 1. L := AES-128(K, const_Zero) ; +
+;;; + Step 2. if MSB(L) is equal to 0 +
+;;; + then K1 := L << 1 ; +
+;;; + else K1 := (L << 1) XOR const_Rb ; +
+;;; + Step 3. if MSB(K1) is equal to 0 +
+;;; + then K2 := K1 << 1 ; +
+;;; + else K2 := (K1 << 1) XOR const_Rb ; +
+;;; + Step 4. return K1, K2 ; +
+;;; + +
+;;; +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+MKGLOBAL(aes_cmac_subkey_gen_sse,function,)
+align 32
+aes_cmac_subkey_gen_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY_EXP, 0
+ jz aes_cmac_subkey_gen_sse_return
+ cmp KEY1, 0
+ jz aes_cmac_subkey_gen_sse_return
+ cmp KEY2, 0
+ jz aes_cmac_subkey_gen_sse_return
+%endif
+
+ ;; Step 1. L := AES-128(K, const_Zero) ;
+ movdqa XL, [KEY_EXP + 16*0] ; 0. ARK xor const_Zero
+ aesenc XL, [KEY_EXP + 16*1] ; 1. ENC
+ aesenc XL, [KEY_EXP + 16*2] ; 2. ENC
+ aesenc XL, [KEY_EXP + 16*3] ; 3. ENC
+ aesenc XL, [KEY_EXP + 16*4] ; 4. ENC
+ aesenc XL, [KEY_EXP + 16*5] ; 5. ENC
+ aesenc XL, [KEY_EXP + 16*6] ; 6. ENC
+ aesenc XL, [KEY_EXP + 16*7] ; 7. ENC
+ aesenc XL, [KEY_EXP + 16*8] ; 8. ENC
+ aesenc XL, [KEY_EXP + 16*9] ; 9. ENC
+ aesenclast XL, [KEY_EXP + 16*10] ; 10. ENC
+
+ ;; Step 2. if MSB(L) is equal to 0
+ ;; then K1 := L << 1 ;
+ ;; else K1 := (L << 1) XOR const_Rb ;
+ pshufb XL, [rel byteswap_const]
+ movdqa XKEY1, XL
+ psllq XKEY1, 1
+ ptest XL, [rel xmm_bit63]
+ jz K1_no_carry_bit_sse
+ ;; set carry bit
+ por XKEY1, [rel xmm_bit64]
+K1_no_carry_bit_sse:
+ ptest XL, [rel xmm_bit127]
+ jz K1_msb_is_zero_sse
+ ;; XOR const_Rb
+ pxor XKEY1, [rel const_Rb]
+K1_msb_is_zero_sse:
+
+ ;; Step 3. if MSB(K1) is equal to 0
+ ;; then K2 := K1 << 1 ;
+ ;; else K2 := (K1 << 1) XOR const_Rb ;
+ movdqa XKEY2, XKEY1
+ psllq XKEY2, 1
+ ptest XKEY1, [rel xmm_bit63]
+ jz K2_no_carry_bit_sse
+ ;; set carry bit
+ por XKEY2, [rel xmm_bit64]
+K2_no_carry_bit_sse:
+ ptest XKEY1, [rel xmm_bit127]
+ jz K2_msb_is_zero_sse
+ ;; XOR const_Rb
+ pxor XKEY2, [rel const_Rb]
+K2_msb_is_zero_sse:
+
+ ;; Step 4. return K1, K2
+ pshufb XKEY1, [rel byteswap_const]
+ pshufb XKEY2, [rel byteswap_const]
+ movdqu [KEY1], XKEY1
+ movdqu [KEY2], XKEY2
+
+aes_cmac_subkey_gen_sse_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+MKGLOBAL(aes_cmac_subkey_gen_sse_no_aesni,function,)
+align 32
+aes_cmac_subkey_gen_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY_EXP, 0
+ jz aes_cmac_subkey_gen_sse_no_aesni_return
+ cmp KEY1, 0
+ jz aes_cmac_subkey_gen_sse_no_aesni_return
+ cmp KEY2, 0
+ jz aes_cmac_subkey_gen_sse_no_aesni_return
+%endif
+
+ ;; Step 1. L := AES-128(K, const_Zero) ;
+ movdqa XL, [KEY_EXP + 16*0] ; 0. ARK xor const_Zero
+ EMULATE_AESENC XL, [KEY_EXP + 16*1] ; 1. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*2] ; 2. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*3] ; 3. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*4] ; 4. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*5] ; 5. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*6] ; 6. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*7] ; 7. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*8] ; 8. ENC
+ EMULATE_AESENC XL, [KEY_EXP + 16*9] ; 9. ENC
+ EMULATE_AESENCLAST XL, [KEY_EXP + 16*10]; 10. ENC
+
+ ;; Step 2. if MSB(L) is equal to 0
+ ;; then K1 := L << 1 ;
+ ;; else K1 := (L << 1) XOR const_Rb ;
+ pshufb XL, [rel byteswap_const]
+ movdqa XKEY1, XL
+ psllq XKEY1, 1
+ ptest XL, [rel xmm_bit63]
+ jz K1_no_carry_bit_sse2
+ ;; set carry bit
+ por XKEY1, [rel xmm_bit64]
+K1_no_carry_bit_sse2:
+ ptest XL, [rel xmm_bit127]
+ jz K1_msb_is_zero_sse2
+ ;; XOR const_Rb
+ pxor XKEY1, [rel const_Rb]
+K1_msb_is_zero_sse2:
+
+ ;; Step 3. if MSB(K1) is equal to 0
+ ;; then K2 := K1 << 1 ;
+ ;; else K2 := (K1 << 1) XOR const_Rb ;
+ movdqa XKEY2, XKEY1
+ psllq XKEY2, 1
+ ptest XKEY1, [rel xmm_bit63]
+ jz K2_no_carry_bit_sse2
+ ;; set carry bit
+ por XKEY2, [rel xmm_bit64]
+K2_no_carry_bit_sse2:
+ ptest XKEY1, [rel xmm_bit127]
+ jz K2_msb_is_zero_sse2
+ ;; XOR const_Rb
+ pxor XKEY2, [rel const_Rb]
+K2_msb_is_zero_sse2:
+
+ ;; Step 4. return K1, K2
+ pshufb XKEY1, [rel byteswap_const]
+ pshufb XKEY2, [rel byteswap_const]
+ movdqu [KEY1], XKEY1
+ movdqu [KEY2], XKEY2
+
+aes_cmac_subkey_gen_sse_no_aesni_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;
+;;; void aes_cmac_subkey_gen_avx(const void *key_exp, void *key1, void *key2)
+;;;
+;;; key_exp : IN : address of expanded encryption key structure (AES 128)
+;;; key1 : OUT : address to store subkey 1 (AES128 - 16 bytes)
+;;; key2 : OUT : address to store subkey 2 (AES128 - 16 bytes)
+;;;
+;;; See aes_cmac_subkey_gen_sse() above for operation details
+
+MKGLOBAL(aes_cmac_subkey_gen_avx,function,)
+MKGLOBAL(aes_cmac_subkey_gen_avx2,function,)
+MKGLOBAL(aes_cmac_subkey_gen_avx512,function,)
+align 32
+aes_cmac_subkey_gen_avx:
+aes_cmac_subkey_gen_avx2:
+aes_cmac_subkey_gen_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY_EXP, 0
+ jz aes_cmac_subkey_gen_avx_return
+ cmp KEY1, 0
+ jz aes_cmac_subkey_gen_avx_return
+ cmp KEY2, 0
+ jz aes_cmac_subkey_gen_avx_return
+%endif
+
+ ;; Step 1. L := AES-128(K, const_Zero) ;
+ vmovdqa XL, [KEY_EXP + 16*0] ; 0. ARK xor const_Zero
+ vaesenc XL, [KEY_EXP + 16*1] ; 1. ENC
+ vaesenc XL, [KEY_EXP + 16*2] ; 2. ENC
+ vaesenc XL, [KEY_EXP + 16*3] ; 3. ENC
+ vaesenc XL, [KEY_EXP + 16*4] ; 4. ENC
+ vaesenc XL, [KEY_EXP + 16*5] ; 5. ENC
+ vaesenc XL, [KEY_EXP + 16*6] ; 6. ENC
+ vaesenc XL, [KEY_EXP + 16*7] ; 7. ENC
+ vaesenc XL, [KEY_EXP + 16*8] ; 8. ENC
+ vaesenc XL, [KEY_EXP + 16*9] ; 9. ENC
+ vaesenclast XL, [KEY_EXP + 16*10] ; 10. ENC
+
+ ;; Step 2. if MSB(L) is equal to 0
+ ;; then K1 := L << 1 ;
+ ;; else K1 := (L << 1) XOR const_Rb ;
+ vpshufb XL, [rel byteswap_const]
+ vmovdqa XKEY1, XL
+ vpsllq XKEY1, 1
+ vptest XL, [rel xmm_bit63]
+ jz K1_no_carry_bit_avx
+ ;; set carry bit
+ vpor XKEY1, [rel xmm_bit64]
+K1_no_carry_bit_avx:
+ vptest XL, [rel xmm_bit127]
+ jz K1_msb_is_zero_avx
+ ;; XOR const_Rb
+ vpxor XKEY1, [rel const_Rb]
+K1_msb_is_zero_avx:
+
+ ;; Step 3. if MSB(K1) is equal to 0
+ ;; then K2 := K1 << 1 ;
+ ;; else K2 := (K1 << 1) XOR const_Rb ;
+ vmovdqa XKEY2, XKEY1
+ vpsllq XKEY2, 1
+ vptest XKEY1, [rel xmm_bit63]
+ jz K2_no_carry_bit_avx
+ ;; set carry bit
+ vpor XKEY2, [rel xmm_bit64]
+K2_no_carry_bit_avx:
+ vptest XKEY1, [rel xmm_bit127]
+ jz K2_msb_is_zero_avx
+ ;; XOR const_Rb
+ vpxor XKEY2, [rel const_Rb]
+K2_msb_is_zero_avx:
+
+ ;; Step 4. return K1, K2
+ vpshufb XKEY1, [rel byteswap_const]
+ vpshufb XKEY2, [rel byteswap_const]
+ vmovdqu [KEY1], XKEY1
+ vmovdqu [KEY2], XKEY2
+
+aes_cmac_subkey_gen_avx_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/aes_keyexp_128.asm b/src/spdk/intel-ipsec-mb/aes_keyexp_128.asm
new file mode 100644
index 000000000..4ee57b109
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes_keyexp_128.asm
@@ -0,0 +1,523 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; Routine to do AES key expansion
+%include "include/os.asm"
+%define NO_AESNI_RENAME
+%include "include/aesni_emu.inc"
+%include "include/clear_regs.asm"
+
+%macro key_expansion_128_sse 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ pshufd xmm2, xmm2, 11111111b
+ shufps xmm3, xmm1, 00010000b
+ pxor xmm1, xmm3
+ shufps xmm3, xmm1, 10001100b
+ pxor xmm1, xmm3
+ pxor xmm1, xmm2
+%endmacro
+
+%macro key_expansion_128_avx 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ vpshufd xmm2, xmm2, 11111111b
+ vshufps xmm3, xmm3, xmm1, 00010000b
+ vpxor xmm1, xmm1, xmm3
+ vshufps xmm3, xmm3, xmm1, 10001100b
+ vpxor xmm1, xmm1, xmm3
+ vpxor xmm1, xmm1, xmm2
+%endmacro
+
+%ifdef LINUX
+%define KEY rdi
+%define EXP_ENC_KEYS rsi
+%define EXP_DEC_KEYS rdx
+%else
+%define KEY rcx
+%define EXP_ENC_KEYS rdx
+%define EXP_DEC_KEYS r8
+%endif
+
+section .text
+
+; void aes_keyexp_128(UINT128 *key,
+; UINT128 *enc_exp_keys,
+; UINT128 *dec_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+; arg 3: r8: pointer to expanded key array for decrypt
+;
+MKGLOBAL(aes_keyexp_128_sse,function,)
+aes_keyexp_128_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_sse_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_128_sse_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*10], xmm1 ; Storing key in memory
+ pxor xmm3, xmm3
+
+ aeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*1], xmm1
+ aesimc xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*9], xmm4
+
+ aeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*3], xmm1
+ aesimc xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*7], xmm4
+
+ aeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*5], xmm1
+ aesimc xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*5], xmm4
+
+ aeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*7], xmm1
+ aesimc xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*3], xmm4
+
+ aeskeygenassist xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*9], xmm1
+ aesimc xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*1], xmm4
+
+ aeskeygenassist xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+aes_keyexp_128_sse_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+MKGLOBAL(aes_keyexp_128_sse_no_aesni,function,)
+aes_keyexp_128_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_sse_no_aesni_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_128_sse_no_aesni_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*10], xmm1 ; Storing key in memory
+ pxor xmm3, xmm3
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*1], xmm1
+ EMULATE_AESIMC xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*9], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*3], xmm1
+ EMULATE_AESIMC xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*7], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*5], xmm1
+ EMULATE_AESIMC xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*5], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*7], xmm1
+ EMULATE_AESIMC xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*3], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*9], xmm1
+ EMULATE_AESIMC xmm4, xmm1
+ movdqa [EXP_DEC_KEYS + 16*1], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+aes_keyexp_128_sse_no_aesni_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+MKGLOBAL(aes_keyexp_128_avx,function,)
+MKGLOBAL(aes_keyexp_128_avx2,function,)
+MKGLOBAL(aes_keyexp_128_avx512,function,)
+aes_keyexp_128_avx:
+aes_keyexp_128_avx2:
+aes_keyexp_128_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_avx_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_128_avx_return
+%endif
+
+ vmovdqu xmm1, [KEY] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*0], xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*10], xmm1 ; Storing key in memory
+ vpxor xmm3, xmm3, xmm3
+
+ vaeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*1], xmm1
+ vaesimc xmm4, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*9], xmm4
+
+ vaeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*2], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*3], xmm1
+ vaesimc xmm4, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*7], xmm4
+
+ vaeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*4], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*5], xmm1
+ vaesimc xmm4, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*5], xmm4
+
+ vaeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*6], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*7], xmm1
+ vaesimc xmm4, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*3], xmm4
+
+ vaeskeygenassist xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*8], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*9], xmm1
+ vaesimc xmm4, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*1], xmm4
+
+ vaeskeygenassist xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*10], xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+aes_keyexp_128_avx_return:
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; void aes_keyexp_128_enc_sse(UINT128 *key,
+; UINT128 *enc_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+;
+MKGLOBAL(aes_keyexp_128_enc_sse,function,)
+aes_keyexp_128_enc_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_enc_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_enc_sse_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ pxor xmm3, xmm3
+
+ aeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*1], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*3], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*5], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*7], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*9], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+aes_keyexp_128_enc_sse_return:
+ ret
+
+MKGLOBAL(aes_keyexp_128_enc_sse_no_aesni,function,)
+aes_keyexp_128_enc_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_enc_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_enc_sse_no_aesni_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ pxor xmm3, xmm3
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*1], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*3], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*5], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*7], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*9], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+aes_keyexp_128_enc_sse_no_aesni_return:
+ ret
+
+MKGLOBAL(aes_keyexp_128_enc_avx,function,)
+MKGLOBAL(aes_keyexp_128_enc_avx2,function,)
+MKGLOBAL(aes_keyexp_128_enc_avx512,function,)
+aes_keyexp_128_enc_avx:
+aes_keyexp_128_enc_avx2:
+aes_keyexp_128_enc_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_128_enc_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_128_enc_avx_return
+%endif
+
+ vmovdqu xmm1, [KEY] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*0], xmm1
+ vpxor xmm3, xmm3, xmm3
+
+ vaeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 1
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*1], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 2
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 3
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*3], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 4
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 5
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*5], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 6
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x40 ; Generating round key 7
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*7], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x80 ; Generating round key 8
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x1b ; Generating round key 9
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*9], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x36 ; Generating round key 10
+ key_expansion_128_avx
+ vmovdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+aes_keyexp_128_enc_avx_return:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/aes_keyexp_192.asm b/src/spdk/intel-ipsec-mb/aes_keyexp_192.asm
new file mode 100644
index 000000000..0f18d50e7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes_keyexp_192.asm
@@ -0,0 +1,622 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%define NO_AESNI_RENAME
+%include "include/aesni_emu.inc"
+%include "include/clear_regs.asm"
+
+%ifdef LINUX
+%define KEY rdi
+%define EXP_ENC_KEYS rsi
+%define EXP_DEC_KEYS rdx
+%else
+%define KEY rcx
+%define EXP_ENC_KEYS rdx
+%define EXP_DEC_KEYS r8
+%endif
+
+
+
+
+%macro key_expansion_1_192_sse 1
+ ;; Assumes the xmm3 includes all zeros at this point.
+ pshufd xmm2, xmm2, 11111111b
+ shufps xmm3, xmm1, 00010000b
+ pxor xmm1, xmm3
+ shufps xmm3, xmm1, 10001100b
+ pxor xmm1, xmm3
+ pxor xmm1, xmm2
+ movdqu [EXP_ENC_KEYS + %1], xmm1
+%endmacro
+
+; Calculate w10 and w11 using calculated w9 and known w4-w5
+%macro key_expansion_2_192_sse 1
+ movdqa xmm5, xmm4
+ pslldq xmm5, 4
+ shufps xmm6, xmm1, 11110000b
+ pxor xmm6, xmm5
+ pxor xmm4, xmm6
+ pshufd xmm7, xmm4, 00001110b
+ movdqu [EXP_ENC_KEYS + %1], xmm7
+%endmacro
+
+%macro key_dec_192_sse 1
+ movdqa xmm0, [EXP_ENC_KEYS + 16 * %1]
+ aesimc xmm1, xmm0
+ movdqa [EXP_DEC_KEYS + 16 * (12 - %1)], xmm1
+%endmacro
+
+%macro key_dec_192_sse_no_aesni 1
+ movdqa xmm0, [EXP_ENC_KEYS + 16 * %1]
+ EMULATE_AESIMC xmm1, xmm0
+ movdqa [EXP_DEC_KEYS + 16 * (12 - %1)], xmm1
+%endmacro
+
+%macro key_expansion_1_192_avx 1
+ ;; Assumes the xmm3 includes all zeros at this point.
+ vpshufd xmm2, xmm2, 11111111b
+ vshufps xmm3, xmm3, xmm1, 00010000b
+ vpxor xmm1, xmm1, xmm3
+ vshufps xmm3, xmm3, xmm1, 10001100b
+ vpxor xmm1, xmm1, xmm3
+ vpxor xmm1, xmm1, xmm2
+ vmovdqu [EXP_ENC_KEYS + %1], xmm1
+%endmacro
+
+; Calculate w10 and w11 using calculated w9 and known w4-w5
+%macro key_expansion_2_192_avx 1
+ vmovdqa xmm5, xmm4
+ vpslldq xmm5, xmm5, 4
+ vshufps xmm6, xmm6, xmm1, 11110000b
+ vpxor xmm6, xmm6, xmm5
+ vpxor xmm4, xmm4, xmm6
+ vpshufd xmm7, xmm4, 00001110b
+ vmovdqu [EXP_ENC_KEYS + %1], xmm7
+%endmacro
+
+%macro key_dec_192_avx 1
+ vmovdqa xmm0, [EXP_ENC_KEYS + 16 * %1]
+ vaesimc xmm1, xmm0
+ vmovdqa [EXP_DEC_KEYS + 16 * (12 - %1)], xmm1
+%endmacro
+
+section .text
+
+; void aes_keyexp_192(UINT128 *key,
+; UINT128 *enc_exp_keys,
+; UINT128 *dec_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+; arg 3: r8: pointer to expanded key array for decrypt
+;
+MKGLOBAL(aes_keyexp_192_sse,function,)
+aes_keyexp_192_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_sse_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_192_sse_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ movdqa [rsp + 0*16], xmm6
+ movdqa [rsp + 1*16], xmm7
+%endif
+
+ movq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ movq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ pshufd xmm4, xmm7, 01001111b
+ movdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ movdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*12], xmm1
+
+ pxor xmm3, xmm3 ; Set xmm3 to be all zeros. Required for the key_expansion
+ pxor xmm6, xmm6 ; Set xmm3 to be all zeros. Required for the key_expansion
+
+ aeskeygenassist xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_sse 24
+ key_expansion_2_192_sse 40
+
+ aeskeygenassist xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_sse 48
+ key_expansion_2_192_sse 64
+
+ aeskeygenassist xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_sse 72
+ key_expansion_2_192_sse 88
+
+ aeskeygenassist xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_sse 96
+ key_expansion_2_192_sse 112
+
+ aeskeygenassist xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_sse 120
+ key_expansion_2_192_sse 136
+
+ aeskeygenassist xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_sse 144
+ key_expansion_2_192_sse 160
+
+ aeskeygenassist xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_sse 168
+ key_expansion_2_192_sse 184
+
+ aeskeygenassist xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_sse 192
+
+;;; we have already saved the 12 th key, which is pure input on the
+;;; ENC key path
+ movdqa xmm0, [EXP_ENC_KEYS + 16 * 12]
+ movdqa [EXP_DEC_KEYS + 16*0], xmm0
+;;; generate remaining decrypt keys
+ key_dec_192_sse 1
+ key_dec_192_sse 2
+ key_dec_192_sse 3
+ key_dec_192_sse 4
+ key_dec_192_sse 5
+ key_dec_192_sse 6
+ key_dec_192_sse 7
+ key_dec_192_sse 8
+ key_dec_192_sse 9
+ key_dec_192_sse 10
+ key_dec_192_sse 11
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+%ifndef LINUX
+ movdqa xmm6, [rsp + 0*16]
+ movdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_sse_return:
+ ret
+
+MKGLOBAL(aes_keyexp_192_sse_no_aesni,function,)
+aes_keyexp_192_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_sse_no_aesni_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_192_sse_no_aesni_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ movdqa [rsp + 0*16], xmm6
+ movdqa [rsp + 1*16], xmm7
+%endif
+
+ movq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ movq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ pshufd xmm4, xmm7, 01001111b
+ movdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ movdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*12], xmm1
+
+ pxor xmm3, xmm3 ; Set xmm3 to be all zeros. Required for the key_expansion
+ pxor xmm6, xmm6 ; Set xmm3 to be all zeros. Required for the key_expansion
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_sse 24
+ key_expansion_2_192_sse 40
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_sse 48
+ key_expansion_2_192_sse 64
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_sse 72
+ key_expansion_2_192_sse 88
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_sse 96
+ key_expansion_2_192_sse 112
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_sse 120
+ key_expansion_2_192_sse 136
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_sse 144
+ key_expansion_2_192_sse 160
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_sse 168
+ key_expansion_2_192_sse 184
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_sse 192
+
+;;; we have already saved the 12 th key, which is pure input on the
+;;; ENC key path
+ movdqa xmm0, [EXP_ENC_KEYS + 16 * 12]
+ movdqa [EXP_DEC_KEYS + 16*0], xmm0
+;;; generate remaining decrypt keys
+ key_dec_192_sse_no_aesni 1
+ key_dec_192_sse_no_aesni 2
+ key_dec_192_sse_no_aesni 3
+ key_dec_192_sse_no_aesni 4
+ key_dec_192_sse_no_aesni 5
+ key_dec_192_sse_no_aesni 6
+ key_dec_192_sse_no_aesni 7
+ key_dec_192_sse_no_aesni 8
+ key_dec_192_sse_no_aesni 9
+ key_dec_192_sse_no_aesni 10
+ key_dec_192_sse_no_aesni 11
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+%ifndef LINUX
+ movdqa xmm6, [rsp + 0*16]
+ movdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_sse_no_aesni_return:
+ ret
+
+MKGLOBAL(aes_keyexp_192_avx,function,)
+MKGLOBAL(aes_keyexp_192_avx2,function,)
+MKGLOBAL(aes_keyexp_192_avx512,function,)
+aes_keyexp_192_avx:
+aes_keyexp_192_avx2:
+aes_keyexp_192_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_avx_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_192_avx_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ vmovdqa [rsp + 0*16], xmm6
+ vmovdqa [rsp + 1*16], xmm7
+%endif
+
+ vmovq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ vmovq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ vpshufd xmm4, xmm7, 01001111b
+ vmovdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ vmovdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+ vmovdqa [EXP_DEC_KEYS + 16*0], xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*12], xmm1
+
+ vpxor xmm3, xmm3, xmm3
+ vpxor xmm6, xmm6, xmm6
+
+ vaeskeygenassist xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_avx 24
+ key_expansion_2_192_avx 40
+
+ vaeskeygenassist xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_avx 48
+ key_expansion_2_192_avx 64
+
+ vaeskeygenassist xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_avx 72
+ key_expansion_2_192_avx 88
+
+ vaeskeygenassist xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_avx 96
+ key_expansion_2_192_avx 112
+
+ vaeskeygenassist xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_avx 120
+ key_expansion_2_192_avx 136
+
+ vaeskeygenassist xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_avx 144
+ key_expansion_2_192_avx 160
+
+ vaeskeygenassist xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_avx 168
+ key_expansion_2_192_avx 184
+
+ vaeskeygenassist xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_avx 192
+
+;;; we have already saved the 12 th key, which is pure input on the
+;;; ENC key path
+ vmovdqa xmm0, [EXP_ENC_KEYS + 16 * 12]
+ vmovdqa [EXP_DEC_KEYS + 16*0], xmm0
+;;; generate remaining decrypt keys
+ key_dec_192_avx 1
+ key_dec_192_avx 2
+ key_dec_192_avx 3
+ key_dec_192_avx 4
+ key_dec_192_avx 5
+ key_dec_192_avx 6
+ key_dec_192_avx 7
+ key_dec_192_avx 8
+ key_dec_192_avx 9
+ key_dec_192_avx 10
+ key_dec_192_avx 11
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+
+%ifndef LINUX
+ vmovdqa xmm6, [rsp + 0*16]
+ vmovdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_avx_return:
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; void aes_keyexp_192_enc_sse(UINT128 *key,
+; UINT128 *enc_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+;
+MKGLOBAL(aes_keyexp_192_enc_sse,function,)
+aes_keyexp_192_enc_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_enc_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_enc_sse_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ movdqa [rsp + 0*16], xmm6
+ movdqa [rsp + 1*16], xmm7
+%endif
+
+ movq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ movq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ pshufd xmm4, xmm7, 01001111b
+ movdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ movdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+
+ pxor xmm3, xmm3 ; Set xmm3 to be all zeros. Required for the key_expansion.
+ pxor xmm6, xmm6 ; Set xmm3 to be all zeros. Required for the key_expansion.
+
+ aeskeygenassist xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_sse 24
+ key_expansion_2_192_sse 40
+
+ aeskeygenassist xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_sse 48
+ key_expansion_2_192_sse 64
+
+ aeskeygenassist xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_sse 72
+ key_expansion_2_192_sse 88
+
+ aeskeygenassist xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_sse 96
+ key_expansion_2_192_sse 112
+
+ aeskeygenassist xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_sse 120
+ key_expansion_2_192_sse 136
+
+ aeskeygenassist xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_sse 144
+ key_expansion_2_192_sse 160
+
+ aeskeygenassist xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_sse 168
+ key_expansion_2_192_sse 184
+
+ aeskeygenassist xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_sse 192
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+%ifndef LINUX
+ movdqa xmm6, [rsp + 0*16]
+ movdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_enc_sse_return:
+ ret
+
+MKGLOBAL(aes_keyexp_192_enc_sse_no_aesni,function,)
+aes_keyexp_192_enc_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_enc_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_enc_sse_no_aesni_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ movdqa [rsp + 0*16], xmm6
+ movdqa [rsp + 1*16], xmm7
+%endif
+
+ movq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ movq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ pshufd xmm4, xmm7, 01001111b
+ movdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ movdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+
+ pxor xmm3, xmm3 ; Set xmm3 to be all zeros. Required for the key_expansion.
+ pxor xmm6, xmm6 ; Set xmm3 to be all zeros. Required for the key_expansion.
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_sse 24
+ key_expansion_2_192_sse 40
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_sse 48
+ key_expansion_2_192_sse 64
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_sse 72
+ key_expansion_2_192_sse 88
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_sse 96
+ key_expansion_2_192_sse 112
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_sse 120
+ key_expansion_2_192_sse 136
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_sse 144
+ key_expansion_2_192_sse 160
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_sse 168
+ key_expansion_2_192_sse 184
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_sse 192
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+%ifndef LINUX
+ movdqa xmm6, [rsp + 0*16]
+ movdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_enc_sse_no_aesni_return:
+ ret
+
+MKGLOBAL(aes_keyexp_192_enc_avx,function,)
+MKGLOBAL(aes_keyexp_192_enc_avx2,function,)
+MKGLOBAL(aes_keyexp_192_enc_avx512,function,)
+aes_keyexp_192_enc_avx:
+aes_keyexp_192_enc_avx2:
+aes_keyexp_192_enc_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_192_enc_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_192_enc_avx_return
+%endif
+
+%ifndef LINUX
+ sub rsp, 16*2 + 8
+ vmovdqa [rsp + 0*16], xmm6
+ vmovdqa [rsp + 1*16], xmm7
+%endif
+
+ vmovq xmm7, [KEY + 16] ; loading the AES key, 64 bits
+ vmovq [EXP_ENC_KEYS + 16], xmm7 ; Storing key in memory where all key expansion
+ vpshufd xmm4, xmm7, 01001111b
+ vmovdqu xmm1, [KEY] ; loading the AES key, 128 bits
+ vmovdqu [EXP_ENC_KEYS], xmm1 ; Storing key in memory where all key expansion
+
+ vpxor xmm3, xmm3, xmm3
+ vpxor xmm6, xmm6, xmm6
+
+ vaeskeygenassist xmm2, xmm4, 0x1 ; Complete round key 1 and generate round key 2
+ key_expansion_1_192_avx 24
+ key_expansion_2_192_avx 40
+
+ vaeskeygenassist xmm2, xmm4, 0x2 ; Generate round key 3 and part of round key 4
+ key_expansion_1_192_avx 48
+ key_expansion_2_192_avx 64
+
+ vaeskeygenassist xmm2, xmm4, 0x4 ; Complete round key 4 and generate round key 5
+ key_expansion_1_192_avx 72
+ key_expansion_2_192_avx 88
+
+ vaeskeygenassist xmm2, xmm4, 0x8 ; Generate round key 6 and part of round key 7
+ key_expansion_1_192_avx 96
+ key_expansion_2_192_avx 112
+
+ vaeskeygenassist xmm2, xmm4, 0x10 ; Complete round key 7 and generate round key 8
+ key_expansion_1_192_avx 120
+ key_expansion_2_192_avx 136
+
+ vaeskeygenassist xmm2, xmm4, 0x20 ; Generate round key 9 and part of round key 10
+ key_expansion_1_192_avx 144
+ key_expansion_2_192_avx 160
+
+ vaeskeygenassist xmm2, xmm4, 0x40 ; Complete round key 10 and generate round key 11
+ key_expansion_1_192_avx 168
+ key_expansion_2_192_avx 184
+
+ vaeskeygenassist xmm2, xmm4, 0x80 ; Generate round key 12
+ key_expansion_1_192_avx 192
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+
+%ifndef LINUX
+ vmovdqa xmm6, [rsp + 0*16]
+ vmovdqa xmm7, [rsp + 1*16]
+ add rsp, 16*2 + 8
+%endif
+
+aes_keyexp_192_enc_avx_return:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/aes_keyexp_256.asm b/src/spdk/intel-ipsec-mb/aes_keyexp_256.asm
new file mode 100644
index 000000000..1acdac3dd
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes_keyexp_256.asm
@@ -0,0 +1,677 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; Routine to do AES key expansion
+%include "include/os.asm"
+%define NO_AESNI_RENAME
+%include "include/aesni_emu.inc"
+%include "include/clear_regs.asm"
+
+; Uses the f() function of the aeskeygenassist result
+%macro key_expansion_256_sse 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ pshufd xmm2, xmm2, 11111111b
+ shufps xmm3, xmm1, 00010000b
+ pxor xmm1, xmm3
+ shufps xmm3, xmm1, 10001100b
+ pxor xmm1, xmm3
+ pxor xmm1, xmm2
+%endmacro
+
+; Uses the SubWord function of the aeskeygenassist result
+%macro key_expansion_256_sse_2 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ pshufd xmm2, xmm2, 10101010b
+ shufps xmm3, xmm4, 00010000b
+ pxor xmm4, xmm3
+ shufps xmm3, xmm4, 10001100b
+ pxor xmm4, xmm3
+ pxor xmm4, xmm2
+%endmacro
+
+; Uses the f() function of the aeskeygenassist result
+%macro key_expansion_256_avx 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ vpshufd xmm2, xmm2, 11111111b
+ vshufps xmm3, xmm3, xmm1, 00010000b
+ vpxor xmm1, xmm1, xmm3
+ vshufps xmm3, xmm3, xmm1, 10001100b
+ vpxor xmm1, xmm1, xmm3
+ vpxor xmm1, xmm1, xmm2
+%endmacro
+
+; Uses the SubWord function of the aeskeygenassist result
+%macro key_expansion_256_avx_2 0
+ ;; Assumes the xmm3 includes all zeros at this point.
+ vpshufd xmm2, xmm2, 10101010b
+ vshufps xmm3, xmm3, xmm4, 00010000b
+ vpxor xmm4, xmm4, xmm3
+ vshufps xmm3, xmm3, xmm4, 10001100b
+ vpxor xmm4, xmm4, xmm3
+ vpxor xmm4, xmm4, xmm2
+%endmacro
+
+%ifdef LINUX
+%define KEY rdi
+%define EXP_ENC_KEYS rsi
+%define EXP_DEC_KEYS rdx
+%else
+%define KEY rcx
+%define EXP_ENC_KEYS rdx
+%define EXP_DEC_KEYS r8
+%endif
+
+section .text
+
+; void aes_keyexp_256(UINT128 *key,
+; UINT128 *enc_exp_keys,
+; UINT128 *dec_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+; arg 3: r8: pointer to expanded key array for decrypt
+;
+MKGLOBAL(aes_keyexp_256_sse,function,)
+aes_keyexp_256_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_sse_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_256_sse_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*14], xmm1 ; Storing key in memory
+
+ movdqu xmm4, [KEY+16] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*1], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*13], xmm0 ; Storing key in memory
+
+ pxor xmm3, xmm3 ; Required for the key_expansion.
+
+ aeskeygenassist xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*12], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*3], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*11], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*10], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*5], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*9], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*7], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*7], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*9], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*5], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*11], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*3], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*12], xmm1
+ aesimc xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ aeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*13], xmm4
+ aesimc xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*1], xmm0
+
+ aeskeygenassist xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*14], xmm1
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+aes_keyexp_256_sse_return:
+ ret
+
+MKGLOBAL(aes_keyexp_256_sse_no_aesni,function,)
+aes_keyexp_256_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_sse_no_aesni_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_256_sse_no_aesni_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+ movdqa [EXP_DEC_KEYS + 16*14], xmm1 ; Storing key in memory
+
+ movdqu xmm4, [KEY+16] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*1], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*13], xmm0 ; Storing key in memory
+
+ pxor xmm3, xmm3 ; Required for the key_expansion.
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*12], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*3], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*11], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*10], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*5], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*9], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*7], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*7], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*9], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*5], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*11], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*3], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*12], xmm1
+ EMULATE_AESIMC xmm5, xmm1
+ movdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*13], xmm4
+ EMULATE_AESIMC xmm0, xmm4
+ movdqa [EXP_DEC_KEYS + 16*1], xmm0
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*14], xmm1
+ movdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+aes_keyexp_256_sse_no_aesni_return:
+ ret
+
+MKGLOBAL(aes_keyexp_256_avx,function,)
+MKGLOBAL(aes_keyexp_256_avx2,function,)
+MKGLOBAL(aes_keyexp_256_avx512,function,)
+aes_keyexp_256_avx:
+aes_keyexp_256_avx2:
+aes_keyexp_256_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_avx_return
+ cmp EXP_DEC_KEYS, 0
+ jz aes_keyexp_256_avx_return
+%endif
+
+ vmovdqu xmm1, [KEY] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*0], xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*14], xmm1 ; Storing key in memory
+
+ vmovdqu xmm4, [KEY+16] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*1], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*13], xmm0 ; Storing key in memory
+
+ vpxor xmm3, xmm3, xmm3 ; Required for the key_expansion.
+
+ vaeskeygenassist xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*2], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*12], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*3], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*11], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*4], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*10], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*5], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*9], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*6], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*8], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*7], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*7], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*8], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*6], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*9], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*5], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*10], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*4], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*11], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*3], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*12], xmm1
+ vaesimc xmm5, xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*2], xmm5
+
+ vaeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*13], xmm4
+ vaesimc xmm0, xmm4
+ vmovdqa [EXP_DEC_KEYS + 16*1], xmm0
+
+ vaeskeygenassist xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*14], xmm1
+ vmovdqa [EXP_DEC_KEYS + 16*0], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+
+aes_keyexp_256_avx_return:
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; void aes_keyexp_256_enc_sse(UINT128 *key,
+; UINT128 *enc_exp_keys);
+;
+; arg 1: rcx: pointer to key
+; arg 2: rdx: pointer to expanded key array for encrypt
+;
+MKGLOBAL(aes_keyexp_256_enc_sse,function,)
+aes_keyexp_256_enc_sse:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_enc_sse_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_enc_sse_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+
+ movdqu xmm4, [KEY+16] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*1], xmm4
+
+ pxor xmm3, xmm3 ; Required for the key_expansion.
+
+ aeskeygenassist xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*3], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*5], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*7], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*9], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*11], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*12], xmm1
+
+ aeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*13], xmm4
+
+ aeskeygenassist xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*14], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+aes_keyexp_256_enc_sse_return:
+ ret
+
+MKGLOBAL(aes_keyexp_256_enc_sse_no_aesni,function,)
+aes_keyexp_256_enc_sse_no_aesni:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_enc_sse_no_aesni_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_enc_sse_no_aesni_return
+%endif
+
+ movdqu xmm1, [KEY] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*0], xmm1
+
+ movdqu xmm4, [KEY+16] ; loading the AES key
+ movdqa [EXP_ENC_KEYS + 16*1], xmm4
+
+ pxor xmm3, xmm3 ; Required for the key_expansion.
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*3], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*5], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*7], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*9], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*11], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*12], xmm1
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_sse_2
+ movdqa [EXP_ENC_KEYS + 16*13], xmm4
+
+ EMULATE_AESKEYGENASSIST xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_sse
+ movdqa [EXP_ENC_KEYS + 16*14], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+
+aes_keyexp_256_enc_sse_no_aesni_return:
+ ret
+
+MKGLOBAL(aes_keyexp_256_enc_avx,function,)
+MKGLOBAL(aes_keyexp_256_enc_avx2,function,)
+MKGLOBAL(aes_keyexp_256_enc_avx512,function,)
+aes_keyexp_256_enc_avx:
+aes_keyexp_256_enc_avx2:
+aes_keyexp_256_enc_avx512:
+
+%ifdef SAFE_PARAM
+ cmp KEY, 0
+ jz aes_keyexp_256_enc_avx_return
+ cmp EXP_ENC_KEYS, 0
+ jz aes_keyexp_256_enc_avx_return
+%endif
+
+ vmovdqu xmm1, [KEY] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*0], xmm1
+
+ vmovdqu xmm4, [KEY+16] ; loading the AES key
+ vmovdqa [EXP_ENC_KEYS + 16*1], xmm4
+
+ vpxor xmm3, xmm3, xmm3 ; Required for the key_expansion.
+
+ vaeskeygenassist xmm2, xmm4, 0x1 ; Generating round key 2
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*2], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x1 ; Generating round key 3
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*3], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x2 ; Generating round key 4
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*4], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x2 ; Generating round key 5
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*5], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x4 ; Generating round key 6
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*6], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x4 ; Generating round key 7
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*7], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x8 ; Generating round key 8
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*8], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x8 ; Generating round key 9
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*9], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x10 ; Generating round key 10
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*10], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x10 ; Generating round key 11
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*11], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x20 ; Generating round key 12
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*12], xmm1
+
+ vaeskeygenassist xmm2, xmm1, 0x20 ; Generating round key 13
+ key_expansion_256_avx_2
+ vmovdqa [EXP_ENC_KEYS + 16*13], xmm4
+
+ vaeskeygenassist xmm2, xmm4, 0x40 ; Generating round key 14
+ key_expansion_256_avx
+ vmovdqa [EXP_ENC_KEYS + 16*14], xmm1
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+
+aes_keyexp_256_enc_avx_return:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/aes_xcbc_expand_key.c b/src/spdk/intel-ipsec-mb/aes_xcbc_expand_key.c
new file mode 100644
index 000000000..7232ea12d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/aes_xcbc_expand_key.c
@@ -0,0 +1,139 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+
+#include <stdio.h>
+#include "intel-ipsec-mb.h"
+
+#include "noaesni.h"
+#include "asm.h"
+#include "include/clear_regs_mem.h"
+
+static uint32_t in[4*3] = {
+ 0x01010101, 0x01010101, 0x01010101, 0x01010101,
+ 0x02020202, 0x02020202, 0x02020202, 0x02020202,
+ 0x03030303, 0x03030303, 0x03030303, 0x03030303
+};
+
+void
+aes_xcbc_expand_key_sse(const void *key, void *k1_exp, void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ DECLARE_ALIGNED(uint32_t keys_exp_enc[11*4], 16);
+
+ aes_keyexp_128_enc_sse(key, keys_exp_enc);
+
+ aes128_ecbenc_x3_sse(in, keys_exp_enc, k1_exp, k2, k3);
+
+ aes_keyexp_128_enc_sse(k1_exp, k1_exp);
+
+#ifdef SAFE_DATA
+ clear_mem(&keys_exp_enc, sizeof(keys_exp_enc));
+#endif
+}
+
+void
+aes_xcbc_expand_key_sse_no_aesni(const void *key, void *k1_exp,
+ void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ DECLARE_ALIGNED(uint32_t keys_exp_enc[11*4], 16);
+
+ aes_keyexp_128_enc_sse_no_aesni(key, keys_exp_enc);
+
+ aes128_ecbenc_x3_sse_no_aesni(in, keys_exp_enc, k1_exp, k2, k3);
+
+ aes_keyexp_128_enc_sse_no_aesni(k1_exp, k1_exp);
+
+#ifdef SAFE_DATA
+ clear_mem(&keys_exp_enc, sizeof(keys_exp_enc));
+#endif
+}
+
+__forceinline
+void
+aes_xcbc_expand_key_avx_common(const void *key,
+ void *k1_exp, void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ DECLARE_ALIGNED(uint32_t keys_exp_enc[11*4], 16);
+
+ aes_keyexp_128_enc_avx(key, keys_exp_enc);
+
+ aes128_ecbenc_x3_avx(in, keys_exp_enc, k1_exp, k2, k3);
+
+ aes_keyexp_128_enc_avx(k1_exp, k1_exp);
+
+#ifdef SAFE_DATA
+ clear_mem(&keys_exp_enc, sizeof(keys_exp_enc));
+#endif
+}
+
+void
+aes_xcbc_expand_key_avx(const void *key, void *k1_exp, void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ aes_xcbc_expand_key_avx_common(key, k1_exp, k2, k3);
+}
+
+void
+aes_xcbc_expand_key_avx2(const void *key, void *k1_exp, void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ aes_xcbc_expand_key_avx_common(key, k1_exp, k2, k3);
+}
+
+void
+aes_xcbc_expand_key_avx512(const void *key, void *k1_exp, void *k2, void *k3)
+{
+#ifdef SAFE_PARAM
+ if ((key == NULL) || (k1_exp == NULL) ||
+ (k2 == NULL) || (k3 == NULL))
+ return;
+#endif
+ aes_xcbc_expand_key_avx_common(key, k1_exp, k2, k3);
+}
diff --git a/src/spdk/intel-ipsec-mb/alloc.c b/src/spdk/intel-ipsec-mb/alloc.c
new file mode 100644
index 000000000..12f66d9ac
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/alloc.c
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdint.h>
+#ifdef LINUX
+#include <stdlib.h> /* posix_memalign() and free() */
+#else
+#include <malloc.h> /* _aligned_malloc() and aligned_free() */
+#endif
+#include "intel-ipsec-mb.h"
+#include "cpu_feature.h"
+
+/**
+ * @brief Allocates memory for multi-buffer manager instance
+ *
+ * For binary compatibility between library versions
+ * it is recommended to use this API.
+ *
+ * @param flags multi-buffer manager flags
+ * IMB_FLAG_SHANI_OFF - disable use (and detection) of SHA extenstions,
+ * currently SHANI is only available for SSE
+ *
+ * @return Pointer to allocated memory for MB_MGR structure
+ * @retval NULL on allocation error
+ */
+MB_MGR *alloc_mb_mgr(uint64_t flags)
+{
+ const size_t alignment = 64;
+ const size_t size = sizeof(MB_MGR);
+ MB_MGR *ptr = NULL;
+
+#ifdef LINUX
+ if (posix_memalign((void **)&ptr, alignment, size))
+ return NULL;
+#else
+ ptr = _aligned_malloc(size, alignment);
+#endif
+ if (ptr != NULL) {
+ ptr->flags = flags; /* save the flags for future use in init */
+ ptr->features = cpu_feature_adjust(flags, cpu_feature_detect());
+ }
+ IMB_ASSERT(ptr != NULL);
+ return ptr;
+}
+
+/**
+ * @brief Frees memory allocated previously by alloc_mb_mgr()
+ *
+ * @param ptr a pointer to allocated MB_MGR structure
+ *
+ */
+void free_mb_mgr(MB_MGR *ptr)
+{
+ IMB_ASSERT(ptr != NULL);
+#ifdef LINUX
+ free(ptr);
+#else
+ _aligned_free(ptr);
+#endif
+}
diff --git a/src/spdk/intel-ipsec-mb/asm.h b/src/spdk/intel-ipsec-mb/asm.h
new file mode 100644
index 000000000..ced2ec2f1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/asm.h
@@ -0,0 +1,212 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/* interface to asm routines */
+
+#ifndef IMB_ASM_H
+#define IMB_ASM_H
+
+#include "intel-ipsec-mb.h"
+
+/* Define interface to base asm code */
+
+/* AES-CBC */
+void aes_cbc_enc_128_x8(AES_ARGS *args, uint64_t len_in_bytes);
+void aes_cbc_enc_192_x8(AES_ARGS *args, uint64_t len_in_bytes);
+void aes_cbc_enc_256_x8(AES_ARGS *args, uint64_t len_in_bytes);
+
+void aes_cbc_dec_128_avx(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_cbc_dec_192_avx(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_cbc_dec_256_avx(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+
+void aes_cbc_dec_128_vaes_avx512(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+void aes_cbc_dec_192_vaes_avx512(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+void aes_cbc_dec_256_vaes_avx512(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+
+void aes_cbc_dec_128_sse(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_cbc_dec_192_sse(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_cbc_dec_256_sse(const void *in, const uint8_t *IV, const void *keys,
+ void *out, uint64_t len_bytes);
+
+void aes_cbc_dec_128_sse_no_aesni(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+void aes_cbc_dec_192_sse_no_aesni(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+void aes_cbc_dec_256_sse_no_aesni(const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes);
+
+/* AES-CTR */
+void aes_cntr_256_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_192_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_128_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+
+void aes_cntr_256_sse_no_aesni(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_192_sse_no_aesni(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_128_sse_no_aesni(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+
+
+void aes_cntr_256_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_192_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+void aes_cntr_128_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bytes, uint64_t IV_len);
+
+void aes_cntr_128_submit_vaes_avx512(JOB_AES_HMAC *job);
+void aes_cntr_192_submit_vaes_avx512(JOB_AES_HMAC *job);
+void aes_cntr_256_submit_vaes_avx512(JOB_AES_HMAC *job);
+
+/* AES-CTR-BITLEN */
+void aes_cntr_bit_256_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_192_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_128_sse(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+
+void aes_cntr_bit_256_sse_no_aesni(const void *in, const void *IV,
+ const void *keys, void *out,
+ uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_192_sse_no_aesni(const void *in, const void *IV,
+ const void *keys, void *out,
+ uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_128_sse_no_aesni(const void *in, const void *IV,
+ const void *keys, void *out,
+ uint64_t len_bits, uint64_t IV_len);
+
+void aes_cntr_bit_256_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_192_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+void aes_cntr_bit_128_avx(const void *in, const void *IV, const void *keys,
+ void *out, uint64_t len_bits, uint64_t IV_len);
+
+void aes_cntr_bit_128_submit_vaes_avx512(JOB_AES_HMAC *job);
+void aes_cntr_bit_192_submit_vaes_avx512(JOB_AES_HMAC *job);
+void aes_cntr_bit_256_submit_vaes_avx512(JOB_AES_HMAC *job);
+
+/* AES-CCM */
+JOB_AES_HMAC *aes_cntr_ccm_128_sse(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *aes_cntr_ccm_128_sse_no_aesni(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *aes_cntr_ccm_128_avx(JOB_AES_HMAC *job);
+
+/* AES-ECB */
+void aes_ecb_enc_256_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_192_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_128_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+void aes_ecb_enc_256_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_192_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_128_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+
+void aes_ecb_enc_256_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_192_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_enc_128_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+void aes_ecb_dec_256_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_192_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_128_sse(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+void aes_ecb_dec_256_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_192_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_128_sse_no_aesni(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+
+void aes_ecb_dec_256_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_192_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+void aes_ecb_dec_128_avx(const void *in, const void *keys,
+ void *out, uint64_t len_bytes);
+
+/* AES128-ECBENC */
+void aes128_ecbenc_x3_sse(const void *in, void *keys,
+ void *out1, void *out2, void *out3);
+void aes128_ecbenc_x3_sse_no_aesni(const void *in, void *keys,
+ void *out1, void *out2, void *out3);
+void aes128_ecbenc_x3_avx(const void *in, void *keys,
+ void *out1, void *out2, void *out3);
+
+/* stitched AES128-CNTR, CRC32 and BIP */
+JOB_AES_HMAC *submit_job_pon_enc_avx(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_avx(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_pon_enc_sse(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_sse(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_pon_enc_sse_no_aesni(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_sse_no_aesni(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_pon_enc_no_ctr_avx(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_no_ctr_avx(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_pon_enc_no_ctr_sse(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_no_ctr_sse(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_pon_enc_no_ctr_sse_no_aesni(JOB_AES_HMAC *job);
+JOB_AES_HMAC *submit_job_pon_dec_no_ctr_sse_no_aesni(JOB_AES_HMAC *job);
+#endif /* IMB_ASM_H */
+
+
diff --git a/src/spdk/intel-ipsec-mb/avx/aes128_cbc_dec_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes128_cbc_dec_by8_avx.asm
new file mode 100644
index 000000000..a4de936ff
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes128_cbc_dec_by8_avx.asm
@@ -0,0 +1,306 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES128 CBC decrypt "by8"
+
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xIV xmm8
+%define xkey0 xmm9
+%define xkey2 xmm10
+%define xkey4 xmm11
+%define xkey6 xmm12
+%define xkey8 xmm13
+%define xkey10 xmm14
+%define xkeytmp xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes rax
+%endif
+
+%define tmp r10
+
+%macro do_aes_load 1
+ do_aes %1, 1
+%endmacro
+
+%macro do_aes_noload 1
+ do_aes %1, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 2
+%define %%by %1
+%define %%load_keys %2
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ CONCAT(xdata,i), [p_in + i*16]
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey2, [p_keys + 2*16]
+%endif
+%assign i 0
+%rep %%by
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkey0
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+ vmovdqa xkeytmp, [p_keys + 1*16]
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeytmp
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey4, [p_keys + 4*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey2
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeytmp, [p_keys + 3*16]
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeytmp
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey6, [p_keys + 6*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey4
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeytmp, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeytmp
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey8, [p_keys + 8*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey6
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeytmp, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeytmp
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey10, [p_keys + 10*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey8
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeytmp, [p_keys + 9*16]
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeytmp
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ vaesdeclast CONCAT(xdata,i), CONCAT(xdata,i), xkey10
+%assign i (i+1)
+%endrep
+
+ vpxor xdata0, xdata0, xIV
+%assign i 1
+%if (%%by > 1)
+%rep (%%by - 1)
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xIV
+%assign i (i+1)
+%endrep
+%endif
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+
+;; aes_cbc_dec_128_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+MKGLOBAL(aes_cbc_dec_128_avx,function,internal)
+aes_cbc_dec_128_avx:
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+ vmovdqu xIV, [p_IV]
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz mult_of_8_blks
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg gt4
+ je eq4
+
+lt4:
+ cmp tmp, 2*16
+ jg eq3
+ je eq2
+eq1:
+ do_aes_load 1
+ add p_out, 1*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq2:
+ do_aes_load 2
+ add p_out, 2*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq3:
+ do_aes_load 3
+ add p_out, 3*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq4:
+ do_aes_load 4
+ add p_out, 4*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+gt4:
+ cmp tmp, 6*16
+ jg eq7
+ je eq6
+
+eq5:
+ do_aes_load 5
+ add p_out, 5*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq6:
+ do_aes_load 6
+ add p_out, 6*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq7:
+ do_aes_load 7
+ add p_out, 7*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+mult_of_8_blks:
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey2, [p_keys + 2*16]
+ vmovdqa xkey4, [p_keys + 4*16]
+ vmovdqa xkey6, [p_keys + 6*16]
+ vmovdqa xkey8, [p_keys + 8*16]
+ vmovdqa xkey10, [p_keys + 10*16]
+
+main_loop2:
+ ; num_bytes is a multiple of 8 and >0
+ do_aes_noload 8
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ jne main_loop2
+
+do_return2:
+; Don't write back IV
+; vmovdqu [p_IV], xIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes128_cbc_mac_x8.asm b/src/spdk/intel-ipsec-mb/avx/aes128_cbc_mac_x8.asm
new file mode 100644
index 000000000..4d08bfde5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes128_cbc_mac_x8.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; Routine to compute CBC-MAC. It is based on 128 bit CBC AES encrypt code.
+
+%define CBC_MAC 1
+%include "avx/aes_cbc_enc_128_x8.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/aes128_cntr_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes128_cntr_by8_avx.asm
new file mode 100644
index 000000000..d46a29192
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes128_cntr_by8_avx.asm
@@ -0,0 +1,606 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+%include "include/reg_sizes.asm"
+
+; routine to do AES128 CNTR enc/decrypt "by8"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+section .data
+default rel
+
+%ifndef CNTR_CCM_AVX
+MKGLOBAL(byteswap_const,data,internal)
+MKGLOBAL(set_byte15,data,internal)
+MKGLOBAL(ddq_add_1,data,internal)
+MKGLOBAL(ddq_add_2,data,internal)
+MKGLOBAL(ddq_add_3,data,internal)
+MKGLOBAL(ddq_add_4,data,internal)
+MKGLOBAL(ddq_add_5,data,internal)
+MKGLOBAL(ddq_add_6,data,internal)
+MKGLOBAL(ddq_add_7,data,internal)
+MKGLOBAL(ddq_add_8,data,internal)
+%endif ;; CNTR_CCM_AVX
+
+align 16
+byteswap_const: ;DDQ 0x000102030405060708090A0B0C0D0E0F
+ DQ 0x08090A0B0C0D0E0F, 0x0001020304050607
+set_byte15: DQ 0x0000000000000000, 0x0100000000000000
+
+ddq_add_1: ;DDQ 0x00000000000000000000000000000001
+ DQ 0x0000000000000001, 0x0000000000000000
+ddq_add_2: ;DDQ 0x00000000000000000000000000000002
+ DQ 0x0000000000000002, 0x0000000000000000
+ddq_add_3: ;DDQ 0x00000000000000000000000000000003
+ DQ 0x0000000000000003, 0x0000000000000000
+ddq_add_4: ;DDQ 0x00000000000000000000000000000004
+ DQ 0x0000000000000004, 0x0000000000000000
+ddq_add_5: ;DDQ 0x00000000000000000000000000000005
+ DQ 0x0000000000000005, 0x0000000000000000
+ddq_add_6: ;DDQ 0x00000000000000000000000000000006
+ DQ 0x0000000000000006, 0x0000000000000000
+ddq_add_7: ;DDQ 0x00000000000000000000000000000007
+ DQ 0x0000000000000007, 0x0000000000000000
+ddq_add_8: ;DDQ 0x00000000000000000000000000000008
+ DQ 0x0000000000000008, 0x0000000000000000
+
+section .text
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey3 xmm11
+%define xkey6 xmm12
+%define xkey9 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef CNTR_CCM_AVX
+%ifdef LINUX
+%define job rdi
+%define p_in rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define p_ivlen r9
+%else ;; LINUX
+%define job rcx
+%define p_in rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define p_ivlen rax
+%endif ;; LINUX
+%define p_IV r11
+%else ;; CNTR_CCM_AVX
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else ;; LINUX
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif ;; LINUX
+%endif ;; CNTR_CCM_AVX
+
+%define tmp r11
+%define flags r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+ vpshufb xdata0, xcounter, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ vpaddd CONCAT(xdata,i), xcounter, [rel CONCAT(ddq_add_,i)]
+ vpshufb CONCAT(xdata,i), CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 1*16]
+
+ vpxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ vpaddd xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ vpaddq xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ vpxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey3, [p_keys + 3*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+ vmovdqa xkeyB, [p_keys + 4*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey3 ; key 3
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 4
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey6, [p_keys + 6*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey6 ; key 6
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 8*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey9, [p_keys + 9*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 8
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey9 ; key 9
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ vaesenclast CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ VMOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+ vpxor CONCAT(xdata,j), CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ vmovdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+ vpslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ vpandn CONCAT(xdata,idx), xtmp2, CONCAT(xdata,idx)
+ vpor CONCAT(xdata,idx), xtmp
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT/CCM)
+
+%ifidn %%CNTR_TYPE, CCM
+ mov p_in, [job + _src]
+ add p_in, [job + _cipher_start_src_offset_in_bytes]
+ mov p_ivlen, [job + _iv_len_in_bytes]
+ mov num_bytes, [job + _msg_len_to_cipher_in_bytes]
+ mov p_keys, [job + _aes_enc_key_expanded]
+ mov p_out, [job + _dst]
+
+ vmovdqa xbyteswap, [rel byteswap_const]
+ ;; Prepare IV ;;
+
+ ;; Byte 0: flags with L'
+ ;; Calculate L' = 15 - Nonce length - 1 = 14 - IV length
+ mov flags, 14
+ sub flags, p_ivlen
+ vmovd xcounter, DWORD(flags)
+ ;; Bytes 1 - 13: Nonce (7 - 13 bytes long)
+
+ ;; Bytes 1 - 7 are always copied (first 7 bytes)
+ mov p_IV, [job + _iv]
+ vpinsrb xcounter, [p_IV], 1
+ vpinsrw xcounter, [p_IV + 1], 1
+ vpinsrd xcounter, [p_IV + 3], 1
+
+ cmp p_ivlen, 7
+ je _finish_nonce_move
+
+ cmp p_ivlen, 8
+ je _iv_length_8
+ cmp p_ivlen, 9
+ je _iv_length_9
+ cmp p_ivlen, 10
+ je _iv_length_10
+ cmp p_ivlen, 11
+ je _iv_length_11
+ cmp p_ivlen, 12
+ je _iv_length_12
+
+ ;; Bytes 8 - 13
+_iv_length_13:
+ vpinsrb xcounter, [p_IV + 12], 13
+_iv_length_12:
+ vpinsrb xcounter, [p_IV + 11], 12
+_iv_length_11:
+ vpinsrd xcounter, [p_IV + 7], 2
+ jmp _finish_nonce_move
+_iv_length_10:
+ vpinsrb xcounter, [p_IV + 9], 10
+_iv_length_9:
+ vpinsrb xcounter, [p_IV + 8], 9
+_iv_length_8:
+ vpinsrb xcounter, [p_IV + 7], 8
+
+_finish_nonce_move:
+ ; last byte = 1
+ vpor xcounter, [rel set_byte15]
+%else ;; CNTR/CNTR_BIT
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5] ; arg5
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ vmovdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ vpinsrq xcounter, [p_IV], 0
+ vpinsrd xcounter, [p_IV + 8], 2
+ vpinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ vmovdqu xcounter, [p_IV]
+%endif
+%endif ;; CNTR/CNTR_BIT/CCM
+%%bswap_iv:
+ vpshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz %%chk ; x8 > or < 15 (not 7 lines)
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg %%gt4
+ je %%eq4
+
+%%lt4:
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE
+ add p_out, 3*16
+ jmp %%chk
+
+%%eq4:
+ do_aes_load 4, %%CNTR_TYPE
+ add p_out, 4*16
+ jmp %%chk
+
+%%gt4:
+ cmp tmp, 6*16
+ jg %%eq7
+ je %%eq6
+
+%%eq5:
+ do_aes_load 5, %%CNTR_TYPE
+ add p_out, 5*16
+ jmp %%chk
+
+%%eq6:
+ do_aes_load 6, %%CNTR_TYPE
+ add p_out, 6*16
+ jmp %%chk
+
+%%eq7:
+ do_aes_load 7, %%CNTR_TYPE
+ add p_out, 7*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(7*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 8 blocks
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey3, [p_keys + 3*16]
+ vmovdqa xkey6, [p_keys + 6*16]
+ vmovdqa xkey9, [p_keys + 9*16]
+ jmp %%main_loop2
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 8 blocks + partial bytes
+ do_aes_noload 8, %%CNTR_TYPE
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ cmp num_bytes, 8*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+%ifidn %%CNTR_TYPE, CCM
+ mov rax, job
+ or dword [rax + _status], STS_COMPLETED_AES
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_avx_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ vpshufb xcounter, xbyteswap
+ vmovdqa xdata0, xcounter
+ vpxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 9
+ vaesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ vaesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ vpxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_avx_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XVPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ vpandn xdata0, xtmp2, xdata0
+ vpor xdata0, xtmp
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_avx_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ vmovdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+%ifdef CNTR_CCM_AVX
+; JOB_AES_HMAC * aes_cntr_ccm_128_avx(JOB_AES_HMAC *job)
+; arg 1 : job
+MKGLOBAL(aes_cntr_ccm_128_avx,function,internal)
+aes_cntr_ccm_128_avx:
+ DO_CNTR CCM
+%else
+;; aes_cntr_128_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_128_avx,function,internal)
+aes_cntr_128_avx:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_128_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bits,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_bit_128_avx,function,internal)
+aes_cntr_bit_128_avx:
+ DO_CNTR CNTR_BIT
+%endif ;; CNTR_CCM_AVX
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes128_cntr_ccm_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes128_cntr_ccm_by8_avx.asm
new file mode 100644
index 000000000..1a4c11602
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes128_cntr_ccm_by8_avx.asm
@@ -0,0 +1,32 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define CNTR_CCM_AVX
+%ifndef AES_CNTR_CCM_128
+%define AES_CNTR_CCM_128 aes_cntr_ccm_128_avx
+%endif
+%include "avx/aes128_cntr_by8_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/aes192_cbc_dec_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes192_cbc_dec_by8_avx.asm
new file mode 100644
index 000000000..9952c2552
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes192_cbc_dec_by8_avx.asm
@@ -0,0 +1,328 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES192 CBC decrypt "by8"
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+%include "include/os.asm"
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xIV xmm8
+%define xkey0 xmm9
+%define xkey3 xmm10
+%define xkey6 xmm11
+%define xkey9 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes rax
+%endif
+
+%define tmp r10
+
+%macro do_aes_load 1
+ do_aes %1, 1
+%endmacro
+
+%macro do_aes_noload 1
+ do_aes %1, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 2
+%define %%by %1
+%define %%load_keys %2
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ CONCAT(xdata,i), [p_in + i*16]
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 1*16]
+
+%assign i 0
+%rep %%by
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkey0
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 2*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ vmovdqa xkey3, [p_keys + 3*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 4*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey3
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 5*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey6, [p_keys + 6*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 7*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey6
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 8*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey9, [p_keys + 9*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 10*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey9
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 11*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey12, [p_keys + 12*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ vaesdeclast CONCAT(xdata,i), CONCAT(xdata,i), xkey12
+%assign i (i+1)
+%endrep
+
+ vpxor xdata0, xdata0, xIV
+%assign i 1
+%if (%%by > 1)
+%rep (%%by - 1)
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xIV
+%assign i (i+1)
+%endrep
+%endif
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+
+;; aes_cbc_dec_192_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+MKGLOBAL(aes_cbc_dec_192_avx,function,internal)
+aes_cbc_dec_192_avx:
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+ vmovdqu xIV, [p_IV]
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz mult_of_8_blks
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg gt4
+ je eq4
+
+lt4:
+ cmp tmp, 2*16
+ jg eq3
+ je eq2
+eq1:
+ do_aes_load 1
+ add p_out, 1*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq2:
+ do_aes_load 2
+ add p_out, 2*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq3:
+ do_aes_load 3
+ add p_out, 3*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq4:
+ do_aes_load 4
+ add p_out, 4*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+gt4:
+ cmp tmp, 6*16
+ jg eq7
+ je eq6
+
+eq5:
+ do_aes_load 5
+ add p_out, 5*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq6:
+ do_aes_load 6
+ add p_out, 6*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq7:
+ do_aes_load 7
+ add p_out, 7*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+mult_of_8_blks:
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey3, [p_keys + 3*16]
+ vmovdqa xkey6, [p_keys + 6*16]
+ vmovdqa xkey9, [p_keys + 9*16]
+ vmovdqa xkey12, [p_keys + 12*16]
+
+main_loop2:
+ ; num_bytes is a multiple of 8 and >0
+ do_aes_noload 8
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ jne main_loop2
+
+do_return2:
+; Don't write back IV
+; vmovdqu [p_IV], xIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes192_cntr_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes192_cntr_by8_avx.asm
new file mode 100644
index 000000000..e926b4413
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes192_cntr_by8_avx.asm
@@ -0,0 +1,504 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+%include "include/reg_sizes.asm"
+
+; routine to do AES192 CNTR enc/decrypt "by8"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+extern byteswap_const
+extern ddq_add_1, ddq_add_2, ddq_add_3, ddq_add_4
+extern ddq_add_5, ddq_add_6, ddq_add_7, ddq_add_8
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey4 xmm11
+%define xkey8 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif
+
+%define tmp r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+ vpshufb xdata0, xcounter, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ vpaddd CONCAT(xdata,i), xcounter, [rel CONCAT(ddq_add_,i)]
+ vpshufb CONCAT(xdata,i), CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 1*16]
+
+ vpxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ vpaddd xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ vpaddq xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ vpxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 3*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ vmovdqa xkey4, [p_keys + 4*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 3
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey4 ; key 4
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 6*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 6
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey8, [p_keys + 8*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 9*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey8 ; key 8
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 9
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 11*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey12, [p_keys + 12*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 11
+%assign i (i+1)
+%endrep
+
+
+%assign i 0
+%rep %%by
+ vaesenclast CONCAT(xdata,i), CONCAT(xdata,i), xkey12 ; key 12
+%assign i (i+1)
+%endrep
+
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ VMOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+ vpxor CONCAT(xdata,j), CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ vmovdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+ vpslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ vpandn CONCAT(xdata,idx), xtmp2, CONCAT(xdata,idx)
+ vpor CONCAT(xdata,idx), xtmp
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT/CCM)
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ vmovdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ vpinsrq xcounter, [p_IV], 0
+ vpinsrd xcounter, [p_IV + 8], 2
+ vpinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ vmovdqu xcounter, [p_IV]
+%endif
+%%bswap_iv:
+ vpshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz %%chk ; x8 > or < 15 (not 7 lines)
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg %%gt4
+ je %%eq4
+
+%%lt4:
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE
+ add p_out, 3*16
+ jmp %%chk
+
+%%eq4:
+ do_aes_load 4, %%CNTR_TYPE
+ add p_out, 4*16
+ jmp %%chk
+
+%%gt4:
+ cmp tmp, 6*16
+ jg %%eq7
+ je %%eq6
+
+%%eq5:
+ do_aes_load 5, %%CNTR_TYPE
+ add p_out, 5*16
+ jmp %%chk
+
+%%eq6:
+ do_aes_load 6, %%CNTR_TYPE
+ add p_out, 6*16
+ jmp %%chk
+
+%%eq7:
+ do_aes_load 7, %%CNTR_TYPE
+ add p_out, 7*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(7*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 8 blocks
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey4, [p_keys + 4*16]
+ vmovdqa xkey8, [p_keys + 8*16]
+ vmovdqa xkey12, [p_keys + 12*16]
+ jmp %%main_loop2
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 8 blocks + partial bytes
+ do_aes_noload 8, %%CNTR_TYPE
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ cmp num_bytes, 8*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_avx_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ vpshufb xcounter, xbyteswap
+ vmovdqa xdata0, xcounter
+ vpxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 11
+ vaesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ vaesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ vpxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_avx_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XVPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ vpandn xdata0, xtmp2, xdata0
+ vpor xdata0, xtmp
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_avx_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ vmovdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+%ifdef CNTR_CCM_AVX
+; JOB_AES_HMAC * aes_cntr_ccm_192_avx(JOB_AES_HMAC *job)
+; arg 1 : job
+MKGLOBAL(aes_cntr_ccm_192_avx,function,internal)
+aes_cntr_ccm_192_avx:
+ DO_CNTR CCM
+%else
+;; aes_cntr_192_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_192_avx,function,internal)
+aes_cntr_192_avx:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_192_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bits,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_bit_192_avx,function,internal)
+aes_cntr_bit_192_avx:
+ DO_CNTR CNTR_BIT
+%endif ;; CNTR_CCM_AVX
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes256_cbc_dec_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes256_cbc_dec_by8_avx.asm
new file mode 100644
index 000000000..6a8f100ec
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes256_cbc_dec_by8_avx.asm
@@ -0,0 +1,344 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES256 CBC decrypt "by8"
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+%include "include/os.asm"
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xIV xmm8
+%define xkey0 xmm9
+%define xkey3 xmm10
+%define xkey6 xmm11
+%define xkey9 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes rax
+%endif
+
+%define tmp r10
+
+%macro do_aes_load 1
+ do_aes %1, 1
+%endmacro
+
+%macro do_aes_noload 1
+ do_aes %1, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 2
+%define %%by %1
+%define %%load_keys %2
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ CONCAT(xdata,i), [p_in + i*16]
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 1*16]
+
+%assign i 0
+%rep %%by
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkey0
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 2*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ vmovdqa xkey3, [p_keys + 3*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 4*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey3
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 5*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey6, [p_keys + 6*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 7*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey6
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 8*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey9, [p_keys + 9*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 10*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey9
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 11*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey12, [p_keys + 12*16]
+%endif
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 13*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkey12
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 14*16]
+
+%assign i 0
+%rep %%by
+ vaesdec CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ vaesdeclast CONCAT(xdata,i), CONCAT(xdata,i), xkeyB
+%assign i (i+1)
+%endrep
+
+ vpxor xdata0, xdata0, xIV
+%assign i 1
+%if (%%by > 1)
+%rep (%%by - 1)
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xIV
+%assign i (i+1)
+%endrep
+%endif
+ VMOVDQ xIV, [p_in + (i-1)*16 - 16*%%by]
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+
+;; aes_cbc_dec_256_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+MKGLOBAL(aes_cbc_dec_256_avx,function,internal)
+aes_cbc_dec_256_avx:
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+ vmovdqu xIV, [p_IV]
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz mult_of_8_blks
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg gt4
+ je eq4
+
+lt4:
+ cmp tmp, 2*16
+ jg eq3
+ je eq2
+eq1:
+ do_aes_load 1
+ add p_out, 1*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq2:
+ do_aes_load 2
+ add p_out, 2*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq3:
+ do_aes_load 3
+ add p_out, 3*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq4:
+ do_aes_load 4
+ add p_out, 4*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+gt4:
+ cmp tmp, 6*16
+ jg eq7
+ je eq6
+
+eq5:
+ do_aes_load 5
+ add p_out, 5*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq6:
+ do_aes_load 6
+ add p_out, 6*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+eq7:
+ do_aes_load 7
+ add p_out, 7*16
+ and num_bytes, ~7*16
+ jz do_return2
+ jmp main_loop2
+
+mult_of_8_blks:
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey3, [p_keys + 3*16]
+ vmovdqa xkey6, [p_keys + 6*16]
+ vmovdqa xkey9, [p_keys + 9*16]
+ vmovdqa xkey12, [p_keys + 12*16]
+
+main_loop2:
+ ; num_bytes is a multiple of 8 and >0
+ do_aes_noload 8
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ jne main_loop2
+
+do_return2:
+; Don't write back IV
+; vmovdqu [p_IV], xIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes256_cntr_by8_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes256_cntr_by8_avx.asm
new file mode 100644
index 000000000..e201339da
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes256_cntr_by8_avx.asm
@@ -0,0 +1,516 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+%include "include/reg_sizes.asm"
+
+; routine to do AES256 CNTR enc/decrypt "by8"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+extern byteswap_const
+extern ddq_add_1, ddq_add_2, ddq_add_3, ddq_add_4
+extern ddq_add_5, ddq_add_6, ddq_add_7, ddq_add_8
+
+%define CONCAT(a,b) a %+ b
+%define VMOVDQ vmovdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey4 xmm11
+%define xkey8 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif
+
+%define tmp r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ vmovdqa xkey0, [p_keys + 0*16]
+%endif
+
+ vpshufb xdata0, xcounter, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ vpaddd CONCAT(xdata,i), xcounter, [rel CONCAT(ddq_add_,i)]
+ vpshufb CONCAT(xdata,i), CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 1*16]
+
+ vpxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ vpaddd xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ vpaddq xcounter, xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ vpxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 3*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ vmovdqa xkey4, [p_keys + 4*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 3
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey4 ; key 4
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 6*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 6
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey8, [p_keys + 8*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 9*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey8 ; key 8
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 9
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 11*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ vmovdqa xkey12, [p_keys + 12*16]
+%endif
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 11
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyA, [p_keys + 13*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkey12 ; key 12
+%assign i (i+1)
+%endrep
+
+ vmovdqa xkeyB, [p_keys + 14*16]
+%assign i 0
+%rep %%by
+ vaesenc CONCAT(xdata,i), CONCAT(xdata,i), xkeyA ; key 13
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ vaesenclast CONCAT(xdata,i), CONCAT(xdata,i), xkeyB ; key 14
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ VMOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+ vpxor CONCAT(xdata,j), CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ VMOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ vpxor CONCAT(xdata,i), CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ vmovdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+ vpslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ vpandn CONCAT(xdata,idx), xtmp2, CONCAT(xdata,idx)
+ vpor CONCAT(xdata,idx), xtmp
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ VMOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT/CCM)
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ vmovdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ vpinsrq xcounter, [p_IV], 0
+ vpinsrd xcounter, [p_IV + 8], 2
+ vpinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ vmovdqu xcounter, [p_IV]
+%endif
+%%bswap_iv:
+ vpshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+
+ mov tmp, num_bytes
+ and tmp, 7*16
+ jz %%chk ; x8 > or < 15 (not 7 lines)
+
+ ; 1 <= tmp <= 7
+ cmp tmp, 4*16
+ jg %%gt4
+ je %%eq4
+
+%%lt4:
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE
+ add p_out, 3*16
+ jmp %%chk
+
+%%eq4:
+ do_aes_load 4, %%CNTR_TYPE
+ add p_out, 4*16
+ jmp %%chk
+
+%%gt4:
+ cmp tmp, 6*16
+ jg %%eq7
+ je %%eq6
+
+%%eq5:
+ do_aes_load 5, %%CNTR_TYPE
+ add p_out, 5*16
+ jmp %%chk
+
+%%eq6:
+ do_aes_load 6, %%CNTR_TYPE
+ add p_out, 6*16
+ jmp %%chk
+
+%%eq7:
+ do_aes_load 7, %%CNTR_TYPE
+ add p_out, 7*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(7*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 8 blocks
+ vmovdqa xkey0, [p_keys + 0*16]
+ vmovdqa xkey4, [p_keys + 4*16]
+ vmovdqa xkey8, [p_keys + 8*16]
+ vmovdqa xkey12, [p_keys + 12*16]
+ jmp %%main_loop2
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 8 blocks + partial bytes
+ do_aes_noload 8, %%CNTR_TYPE
+ add p_out, 8*16
+ sub num_bytes, 8*16
+ cmp num_bytes, 8*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_avx_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ vpshufb xcounter, xbyteswap
+ vmovdqa xdata0, xcounter
+ vpxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 13
+ vaesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ vaesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ vpxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_avx_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ vmovq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XVPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ vpand xtmp, xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ vpandn xdata0, xtmp2, xdata0
+ vpor xdata0, xtmp
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_avx_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ vmovdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+%ifdef CNTR_CCM_AVX
+; JOB_AES_HMAC * aes_cntr_ccm_256_avx(JOB_AES_HMAC *job)
+; arg 1 : job
+MKGLOBAL(aes_cntr_ccm_256_avx,function,internal)
+aes_cntr_ccm_256_avx:
+ DO_CNTR CCM
+%else
+;; aes_cntr_256_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bytes,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_256_avx,function,internal)
+aes_cntr_256_avx:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_256_avx(void *in, void *IV, void *keys, void *out, UINT64 num_bits,
+;; UINT64 iv_len)
+MKGLOBAL(aes_cntr_bit_256_avx,function,internal)
+aes_cntr_bit_256_avx:
+ DO_CNTR CNTR_BIT
+%endif ;; CNTR_CCM_AVX
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_128_x8.asm b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_128_x8.asm
new file mode 100644
index 000000000..745a8e4d4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_128_x8.asm
@@ -0,0 +1,494 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do a 128 bit CBC AES encrypt and CBC MAC
+
+;; clobbers all registers except for ARG1 and rbp
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%macro VPXOR2 2
+ vpxor %1, %1, %2
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_128_x8(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+struc STACK
+_gpr_save: resq 8
+_len: resq 1
+endstruc
+
+%define GPR_SAVE_AREA rsp + _gpr_save
+%define LEN_AREA rsp + _len
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define ARG arg1
+%define LEN arg2
+
+%define IDX rax
+%define TMP rbx
+
+%define KEYS0 arg3
+%define KEYS1 arg4
+%define KEYS2 rbp
+%define KEYS3 r8
+%define KEYS4 r9
+%define KEYS5 r10
+%define KEYS6 r11
+%define KEYS7 r12
+
+%define IN0 r13
+%define IN2 r14
+%define IN4 r15
+%define IN6 LEN
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XDATA4 xmm4
+%define XDATA5 xmm5
+%define XDATA6 xmm6
+%define XDATA7 xmm7
+
+%define XKEY0_3 xmm8
+%define XKEY1_4 xmm9
+%define XKEY2_5 xmm10
+%define XKEY3_6 xmm11
+%define XKEY4_7 xmm12
+%define XKEY5_8 xmm13
+%define XKEY6_9 xmm14
+%define XTMP xmm15
+
+section .text
+%ifdef CBC_MAC
+MKGLOBAL(aes128_cbc_mac_x8,function,internal)
+aes128_cbc_mac_x8:
+%else
+MKGLOBAL(aes_cbc_enc_128_x8,function,internal)
+aes_cbc_enc_128_x8:
+%endif
+ sub rsp, STACK_size
+ mov [GPR_SAVE_AREA + 8*0], rbp
+%ifdef CBC_MAC
+ mov [GPR_SAVE_AREA + 8*1], rbx
+ mov [GPR_SAVE_AREA + 8*2], r12
+ mov [GPR_SAVE_AREA + 8*3], r13
+ mov [GPR_SAVE_AREA + 8*4], r14
+ mov [GPR_SAVE_AREA + 8*5], r15
+%ifndef LINUX
+ mov [GPR_SAVE_AREA + 8*6], rsi
+ mov [GPR_SAVE_AREA + 8*7], rdi
+%endif
+%endif
+
+ mov IDX, 16
+ mov [LEN_AREA], LEN
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN4, [ARG + _aesarg_in + 8*4]
+ mov IN6, [ARG + _aesarg_in + 8*6]
+
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VMOVDQ XDATA0, [IN0] ; load first block of plain text
+ VMOVDQ XDATA1, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VMOVDQ XDATA2, [IN2] ; load first block of plain text
+ VMOVDQ XDATA3, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VMOVDQ XDATA4, [IN4] ; load first block of plain text
+ VMOVDQ XDATA5, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VMOVDQ XDATA6, [IN6] ; load first block of plain text
+ VMOVDQ XDATA7, [TMP] ; load first block of plain text
+
+
+ VPXOR2 XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ VPXOR2 XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ VPXOR2 XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ VPXOR2 XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+ VPXOR2 XDATA4, [ARG + _aesarg_IV + 16*4] ; plaintext XOR IV
+ VPXOR2 XDATA5, [ARG + _aesarg_IV + 16*5] ; plaintext XOR IV
+ VPXOR2 XDATA6, [ARG + _aesarg_IV + 16*6] ; plaintext XOR IV
+ VPXOR2 XDATA7, [ARG + _aesarg_IV + 16*7] ; plaintext XOR IV
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+ mov KEYS4, [ARG + _aesarg_keys + 8*4]
+ mov KEYS5, [ARG + _aesarg_keys + 8*5]
+ mov KEYS6, [ARG + _aesarg_keys + 8*6]
+ mov KEYS7, [ARG + _aesarg_keys + 8*7]
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vmovdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vmovdqa XKEY1_4, [KEYS1 + 16*4] ; load round 4 key
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vmovdqa XKEY2_5, [KEYS2 + 16*5] ; load round 5 key
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vmovdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vmovdqa XKEY4_7, [KEYS4 + 16*7] ; load round 7 key
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vmovdqa XKEY5_8, [KEYS5 + 16*8] ; load round 8 key
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vmovdqa XKEY6_9, [KEYS6 + 16*9] ; load round 9 key
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+%ifndef CBC_MAC
+ VMOVDQ [TMP], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP], XDATA7 ; write back ciphertext
+%endif
+ cmp [LEN_AREA], IDX
+ je done
+
+main_loop:
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VPXOR2 XDATA0, [IN0 + IDX] ; load next block of plain text
+ VPXOR2 XDATA1, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VPXOR2 XDATA2, [IN2 + IDX] ; load next block of plain text
+ VPXOR2 XDATA3, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VPXOR2 XDATA4, [IN4 + IDX] ; load next block of plain text
+ VPXOR2 XDATA5, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VPXOR2 XDATA6, [IN6 + IDX] ; load next block of plain text
+ VPXOR2 XDATA7, [TMP + IDX] ; load next block of plain text
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+%ifndef CBC_MAC
+ ;; no ciphertext write back for CBC-MAC
+ VMOVDQ [TMP + IDX], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP + IDX], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP + IDX], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP + IDX], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP + IDX], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP + IDX], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP + IDX], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP + IDX], XDATA7 ; write back ciphertext
+%endif
+ add IDX, 16
+ cmp [LEN_AREA], IDX
+ jne main_loop
+
+done:
+ ;; update IV for AES128-CBC / store digest for CBC-MAC
+ vmovdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ vmovdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ vmovdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ vmovdqa [ARG + _aesarg_IV + 16*3], XDATA3
+ vmovdqa [ARG + _aesarg_IV + 16*4], XDATA4
+ vmovdqa [ARG + _aesarg_IV + 16*5], XDATA5
+ vmovdqa [ARG + _aesarg_IV + 16*6], XDATA6
+ vmovdqa [ARG + _aesarg_IV + 16*7], XDATA7
+
+ ;; update IN and OUT
+ vmovd xmm0, [LEN_AREA]
+ vpshufd xmm0, xmm0, 0x44
+ vpaddq xmm1, xmm0, [ARG + _aesarg_in + 16*0]
+ vpaddq xmm2, xmm0, [ARG + _aesarg_in + 16*1]
+ vpaddq xmm3, xmm0, [ARG + _aesarg_in + 16*2]
+ vpaddq xmm4, xmm0, [ARG + _aesarg_in + 16*3]
+ vmovdqa [ARG + _aesarg_in + 16*0], xmm1
+ vmovdqa [ARG + _aesarg_in + 16*1], xmm2
+ vmovdqa [ARG + _aesarg_in + 16*2], xmm3
+ vmovdqa [ARG + _aesarg_in + 16*3], xmm4
+%ifndef CBC_MAC
+ vpaddq xmm5, xmm0, [ARG + _aesarg_out + 16*0]
+ vpaddq xmm6, xmm0, [ARG + _aesarg_out + 16*1]
+ vpaddq xmm7, xmm0, [ARG + _aesarg_out + 16*2]
+ vpaddq xmm8, xmm0, [ARG + _aesarg_out + 16*3]
+ vmovdqa [ARG + _aesarg_out + 16*0], xmm5
+ vmovdqa [ARG + _aesarg_out + 16*1], xmm6
+ vmovdqa [ARG + _aesarg_out + 16*2], xmm7
+ vmovdqa [ARG + _aesarg_out + 16*3], xmm8
+%endif
+
+ ;; XMMs are saved at a higher level
+ mov rbp, [GPR_SAVE_AREA + 8*0]
+%ifdef CBC_MAC
+ mov rbx, [GPR_SAVE_AREA + 8*1]
+ mov r12, [GPR_SAVE_AREA + 8*2]
+ mov r13, [GPR_SAVE_AREA + 8*3]
+ mov r14, [GPR_SAVE_AREA + 8*4]
+ mov r15, [GPR_SAVE_AREA + 8*5]
+%ifndef LINUX
+ mov rsi, [GPR_SAVE_AREA + 8*6]
+ mov rdi, [GPR_SAVE_AREA + 8*7]
+%endif
+%endif
+
+ add rsp, STACK_size
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_192_x8.asm b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_192_x8.asm
new file mode 100644
index 000000000..e446f13c3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_192_x8.asm
@@ -0,0 +1,501 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do a 192 bit CBC AES encrypt
+
+;; clobbers all registers except for ARG1 and rbp
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%macro VPXOR2 2
+ vpxor %1, %1, %2
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_192_x8(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+struc STACK
+_gpr_save: resq 1
+_len: resq 1
+endstruc
+
+%define GPR_SAVE_AREA rsp + _gpr_save
+%define LEN_AREA rsp + _len
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+%define TMP rbx
+
+%define KEYS0 REG3
+%define KEYS1 REG4
+%define KEYS2 rbp
+%define KEYS3 r8
+%define KEYS4 r9
+%define KEYS5 r10
+%define KEYS6 r11
+%define KEYS7 r12
+
+%define IN0 r13
+%define IN2 r14
+%define IN4 r15
+%define IN6 LEN
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XDATA4 xmm4
+%define XDATA5 xmm5
+%define XDATA6 xmm6
+%define XDATA7 xmm7
+
+%define XKEY0_3 xmm8
+%define XKEY1_4 xmm9
+%define XKEY2_5 xmm10
+%define XKEY3_6 xmm11
+%define XKEY4_7 xmm12
+%define XKEY5_8 xmm13
+%define XKEY6_9 xmm14
+%define XTMP xmm15
+
+section .text
+
+MKGLOBAL(aes_cbc_enc_192_x8,function,internal)
+aes_cbc_enc_192_x8:
+
+ sub rsp, STACK_size
+ mov [GPR_SAVE_AREA + 8*0], rbp
+
+ mov IDX, 16
+ mov [LEN_AREA], LEN
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN4, [ARG + _aesarg_in + 8*4]
+ mov IN6, [ARG + _aesarg_in + 8*6]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VMOVDQ XDATA0, [IN0] ; load first block of plain text
+ VMOVDQ XDATA1, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VMOVDQ XDATA2, [IN2] ; load first block of plain text
+ VMOVDQ XDATA3, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VMOVDQ XDATA4, [IN4] ; load first block of plain text
+ VMOVDQ XDATA5, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VMOVDQ XDATA6, [IN6] ; load first block of plain text
+ VMOVDQ XDATA7, [TMP] ; load first block of plain text
+
+
+ VPXOR2 XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ VPXOR2 XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ VPXOR2 XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ VPXOR2 XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+ VPXOR2 XDATA4, [ARG + _aesarg_IV + 16*4] ; plaintext XOR IV
+ VPXOR2 XDATA5, [ARG + _aesarg_IV + 16*5] ; plaintext XOR IV
+ VPXOR2 XDATA6, [ARG + _aesarg_IV + 16*6] ; plaintext XOR IV
+ VPXOR2 XDATA7, [ARG + _aesarg_IV + 16*7] ; plaintext XOR IV
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+ mov KEYS4, [ARG + _aesarg_keys + 8*4]
+ mov KEYS5, [ARG + _aesarg_keys + 8*5]
+ mov KEYS6, [ARG + _aesarg_keys + 8*6]
+ mov KEYS7, [ARG + _aesarg_keys + 8*7]
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vmovdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vmovdqa XKEY1_4, [KEYS1 + 16*4] ; load round 4 key
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vmovdqa XKEY2_5, [KEYS2 + 16*5] ; load round 5 key
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vmovdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vmovdqa XKEY4_7, [KEYS4 + 16*7] ; load round 7 key
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vmovdqa XKEY5_8, [KEYS5 + 16*8] ; load round 8 key
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vmovdqa XKEY6_9, [KEYS6 + 16*9] ; load round 9 key
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenc XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenc XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenc XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenc XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ vaesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ vaesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ vaesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+ vaesenc XDATA4, [KEYS4 + 16*11] ; 11. ENC
+ vaesenc XDATA5, [KEYS5 + 16*11] ; 11. ENC
+ vaesenc XDATA6, [KEYS6 + 16*11] ; 11. ENC
+ vaesenc XDATA7, [KEYS7 + 16*11] ; 11. ENC
+
+
+ vaesenclast XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*12] ; 12. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*12] ; 12. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*12] ; 12. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*12] ; 12. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*12] ; 12. ENC
+
+ VMOVDQ [TMP], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP], XDATA7 ; write back ciphertext
+
+ cmp [LEN_AREA], IDX
+ je done
+
+main_loop:
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VPXOR2 XDATA0, [IN0 + IDX] ; load next block of plain text
+ VPXOR2 XDATA1, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VPXOR2 XDATA2, [IN2 + IDX] ; load next block of plain text
+ VPXOR2 XDATA3, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VPXOR2 XDATA4, [IN4 + IDX] ; load next block of plain text
+ VPXOR2 XDATA5, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VPXOR2 XDATA6, [IN6 + IDX] ; load next block of plain text
+ VPXOR2 XDATA7, [TMP + IDX] ; load next block of plain text
+
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenc XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenc XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenc XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenc XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ vaesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ vaesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ vaesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+ vaesenc XDATA4, [KEYS4 + 16*11] ; 11. ENC
+ vaesenc XDATA5, [KEYS5 + 16*11] ; 11. ENC
+ vaesenc XDATA6, [KEYS6 + 16*11] ; 11. ENC
+ vaesenc XDATA7, [KEYS7 + 16*11] ; 11. ENC
+
+ vaesenclast XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*12] ; 12. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*12] ; 12. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*12] ; 12. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*12] ; 12. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*12] ; 12. ENC
+
+
+ VMOVDQ [TMP + IDX], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP + IDX], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP + IDX], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP + IDX], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP + IDX], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP + IDX], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP + IDX], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP + IDX], XDATA7 ; write back ciphertext
+
+ add IDX, 16
+ cmp [LEN_AREA], IDX
+ jne main_loop
+
+done:
+ ;; update IV
+ vmovdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ vmovdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ vmovdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ vmovdqa [ARG + _aesarg_IV + 16*3], XDATA3
+ vmovdqa [ARG + _aesarg_IV + 16*4], XDATA4
+ vmovdqa [ARG + _aesarg_IV + 16*5], XDATA5
+ vmovdqa [ARG + _aesarg_IV + 16*6], XDATA6
+ vmovdqa [ARG + _aesarg_IV + 16*7], XDATA7
+
+ ;; update IN and OUT
+ vmovd xmm0, [LEN_AREA]
+ vpshufd xmm0, xmm0, 0x44
+ vpaddq xmm1, xmm0, [ARG + _aesarg_in + 16*0]
+ vpaddq xmm2, xmm0, [ARG + _aesarg_in + 16*1]
+ vpaddq xmm3, xmm0, [ARG + _aesarg_in + 16*2]
+ vpaddq xmm4, xmm0, [ARG + _aesarg_in + 16*3]
+ vmovdqa [ARG + _aesarg_in + 16*0], xmm1
+ vmovdqa [ARG + _aesarg_in + 16*1], xmm2
+ vmovdqa [ARG + _aesarg_in + 16*2], xmm3
+ vmovdqa [ARG + _aesarg_in + 16*3], xmm4
+ vpaddq xmm5, xmm0, [ARG + _aesarg_out + 16*0]
+ vpaddq xmm6, xmm0, [ARG + _aesarg_out + 16*1]
+ vpaddq xmm7, xmm0, [ARG + _aesarg_out + 16*2]
+ vpaddq xmm8, xmm0, [ARG + _aesarg_out + 16*3]
+ vmovdqa [ARG + _aesarg_out + 16*0], xmm5
+ vmovdqa [ARG + _aesarg_out + 16*1], xmm6
+ vmovdqa [ARG + _aesarg_out + 16*2], xmm7
+ vmovdqa [ARG + _aesarg_out + 16*3], xmm8
+
+;; XMMs are saved at a higher level
+ mov rbp, [GPR_SAVE_AREA + 8*0]
+
+ add rsp, STACK_size
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_256_x8.asm b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_256_x8.asm
new file mode 100644
index 000000000..75cf285d9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_cbc_enc_256_x8.asm
@@ -0,0 +1,536 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do a 256 bit CBC AES encrypt
+
+;; clobbers all registers except for ARG1 and rbp
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%macro VPXOR2 2
+ vpxor %1, %1, %2
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_256_x8(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+struc STACK
+_gpr_save: resq 1
+_len: resq 1
+endstruc
+
+%define GPR_SAVE_AREA rsp + _gpr_save
+%define LEN_AREA rsp + _len
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+%define TMP rbx
+
+%define KEYS0 REG3
+%define KEYS1 REG4
+%define KEYS2 rbp
+%define KEYS3 r8
+%define KEYS4 r9
+%define KEYS5 r10
+%define KEYS6 r11
+%define KEYS7 r12
+
+%define IN0 r13
+%define IN2 r14
+%define IN4 r15
+%define IN6 LEN
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XDATA4 xmm4
+%define XDATA5 xmm5
+%define XDATA6 xmm6
+%define XDATA7 xmm7
+
+%define XKEY0_3 xmm8
+%define XKEY1_4 xmm9
+%define XKEY2_5 xmm10
+%define XKEY3_6 xmm11
+%define XKEY4_7 xmm12
+%define XKEY5_8 xmm13
+%define XKEY6_9 xmm14
+%define XTMP xmm15
+
+section .text
+MKGLOBAL(aes_cbc_enc_256_x8,function,internal)
+aes_cbc_enc_256_x8:
+
+ sub rsp, STACK_size
+ mov [GPR_SAVE_AREA + 8*0], rbp
+
+ mov IDX, 16
+ mov [LEN_AREA], LEN
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN4, [ARG + _aesarg_in + 8*4]
+ mov IN6, [ARG + _aesarg_in + 8*6]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VMOVDQ XDATA0, [IN0] ; load first block of plain text
+ VMOVDQ XDATA1, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VMOVDQ XDATA2, [IN2] ; load first block of plain text
+ VMOVDQ XDATA3, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VMOVDQ XDATA4, [IN4] ; load first block of plain text
+ VMOVDQ XDATA5, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VMOVDQ XDATA6, [IN6] ; load first block of plain text
+ VMOVDQ XDATA7, [TMP] ; load first block of plain text
+
+
+ VPXOR2 XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ VPXOR2 XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ VPXOR2 XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ VPXOR2 XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+ VPXOR2 XDATA4, [ARG + _aesarg_IV + 16*4] ; plaintext XOR IV
+ VPXOR2 XDATA5, [ARG + _aesarg_IV + 16*5] ; plaintext XOR IV
+ VPXOR2 XDATA6, [ARG + _aesarg_IV + 16*6] ; plaintext XOR IV
+ VPXOR2 XDATA7, [ARG + _aesarg_IV + 16*7] ; plaintext XOR IV
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+ mov KEYS4, [ARG + _aesarg_keys + 8*4]
+ mov KEYS5, [ARG + _aesarg_keys + 8*5]
+ mov KEYS6, [ARG + _aesarg_keys + 8*6]
+ mov KEYS7, [ARG + _aesarg_keys + 8*7]
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vmovdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vmovdqa XKEY1_4, [KEYS1 + 16*4] ; load round 4 key
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vmovdqa XKEY2_5, [KEYS2 + 16*5] ; load round 5 key
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vmovdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vmovdqa XKEY4_7, [KEYS4 + 16*7] ; load round 7 key
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vmovdqa XKEY5_8, [KEYS5 + 16*8] ; load round 8 key
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vmovdqa XKEY6_9, [KEYS6 + 16*9] ; load round 9 key
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenc XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenc XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenc XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenc XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ vaesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ vaesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ vaesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+ vaesenc XDATA4, [KEYS4 + 16*11] ; 11. ENC
+ vaesenc XDATA5, [KEYS5 + 16*11] ; 11. ENC
+ vaesenc XDATA6, [KEYS6 + 16*11] ; 11. ENC
+ vaesenc XDATA7, [KEYS7 + 16*11] ; 11. ENC
+
+
+ vaesenc XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ vaesenc XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ vaesenc XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ vaesenc XDATA3, [KEYS3 + 16*12] ; 12. ENC
+ vaesenc XDATA4, [KEYS4 + 16*12] ; 12. ENC
+ vaesenc XDATA5, [KEYS5 + 16*12] ; 12. ENC
+ vaesenc XDATA6, [KEYS6 + 16*12] ; 12. ENC
+ vaesenc XDATA7, [KEYS7 + 16*12] ; 12. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*13] ; 13. ENC
+ vaesenc XDATA1, [KEYS1 + 16*13] ; 13. ENC
+ vaesenc XDATA2, [KEYS2 + 16*13] ; 13. ENC
+ vaesenc XDATA3, [KEYS3 + 16*13] ; 13. ENC
+ vaesenc XDATA4, [KEYS4 + 16*13] ; 13. ENC
+ vaesenc XDATA5, [KEYS5 + 16*13] ; 13. ENC
+ vaesenc XDATA6, [KEYS6 + 16*13] ; 13. ENC
+ vaesenc XDATA7, [KEYS7 + 16*13] ; 13. ENC
+
+ vaesenclast XDATA0, [KEYS0 + 16*14] ; 14. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*14] ; 14. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*14] ; 14. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*14] ; 14. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*14] ; 14. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*14] ; 14. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*14] ; 14. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*14] ; 14. ENC
+
+ VMOVDQ [TMP], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP], XDATA7 ; write back ciphertext
+
+ cmp [LEN_AREA], IDX
+ je done
+
+main_loop:
+ mov TMP, [ARG + _aesarg_in + 8*1]
+ VPXOR2 XDATA0, [IN0 + IDX] ; load next block of plain text
+ VPXOR2 XDATA1, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*3]
+ VPXOR2 XDATA2, [IN2 + IDX] ; load next block of plain text
+ VPXOR2 XDATA3, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*5]
+ VPXOR2 XDATA4, [IN4 + IDX] ; load next block of plain text
+ VPXOR2 XDATA5, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesarg_in + 8*7]
+ VPXOR2 XDATA6, [IN6 + IDX] ; load next block of plain text
+ VPXOR2 XDATA7, [TMP + IDX] ; load next block of plain text
+
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ mov TMP, [ARG + _aesarg_out + 8*0]
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenc XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenc XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenc XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenc XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ vaesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ vaesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ vaesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+ vaesenc XDATA4, [KEYS4 + 16*11] ; 11. ENC
+ vaesenc XDATA5, [KEYS5 + 16*11] ; 11. ENC
+ vaesenc XDATA6, [KEYS6 + 16*11] ; 11. ENC
+ vaesenc XDATA7, [KEYS7 + 16*11] ; 11. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ vaesenc XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ vaesenc XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ vaesenc XDATA3, [KEYS3 + 16*12] ; 12. ENC
+ vaesenc XDATA4, [KEYS4 + 16*12] ; 12. ENC
+ vaesenc XDATA5, [KEYS5 + 16*12] ; 12. ENC
+ vaesenc XDATA6, [KEYS6 + 16*12] ; 12. ENC
+ vaesenc XDATA7, [KEYS7 + 16*12] ; 12. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*13] ; 13. ENC
+ vaesenc XDATA1, [KEYS1 + 16*13] ; 13. ENC
+ vaesenc XDATA2, [KEYS2 + 16*13] ; 13. ENC
+ vaesenc XDATA3, [KEYS3 + 16*13] ; 13. ENC
+ vaesenc XDATA4, [KEYS4 + 16*13] ; 13. ENC
+ vaesenc XDATA5, [KEYS5 + 16*13] ; 13. ENC
+ vaesenc XDATA6, [KEYS6 + 16*13] ; 13. ENC
+ vaesenc XDATA7, [KEYS7 + 16*13] ; 13. ENC
+
+ vaesenclast XDATA0, [KEYS0 + 16*14] ; 14. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*14] ; 14. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*14] ; 14. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*14] ; 14. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*14] ; 14. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*14] ; 14. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*14] ; 14. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*14] ; 14. ENC
+
+
+ VMOVDQ [TMP + IDX], XDATA0 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*1]
+ VMOVDQ [TMP + IDX], XDATA1 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*2]
+ VMOVDQ [TMP + IDX], XDATA2 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*3]
+ VMOVDQ [TMP + IDX], XDATA3 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*4]
+ VMOVDQ [TMP + IDX], XDATA4 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*5]
+ VMOVDQ [TMP + IDX], XDATA5 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*6]
+ VMOVDQ [TMP + IDX], XDATA6 ; write back ciphertext
+ mov TMP, [ARG + _aesarg_out + 8*7]
+ VMOVDQ [TMP + IDX], XDATA7 ; write back ciphertext
+
+ add IDX, 16
+ cmp [LEN_AREA], IDX
+ jne main_loop
+
+done:
+ ;; update IV
+ vmovdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ vmovdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ vmovdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ vmovdqa [ARG + _aesarg_IV + 16*3], XDATA3
+ vmovdqa [ARG + _aesarg_IV + 16*4], XDATA4
+ vmovdqa [ARG + _aesarg_IV + 16*5], XDATA5
+ vmovdqa [ARG + _aesarg_IV + 16*6], XDATA6
+ vmovdqa [ARG + _aesarg_IV + 16*7], XDATA7
+
+ ;; update IN and OUT
+ vmovd xmm0, [LEN_AREA]
+ vpshufd xmm0, xmm0, 0x44
+ vpaddq xmm1, xmm0, [ARG + _aesarg_in + 16*0]
+ vpaddq xmm2, xmm0, [ARG + _aesarg_in + 16*1]
+ vpaddq xmm3, xmm0, [ARG + _aesarg_in + 16*2]
+ vpaddq xmm4, xmm0, [ARG + _aesarg_in + 16*3]
+ vmovdqa [ARG + _aesarg_in + 16*0], xmm1
+ vmovdqa [ARG + _aesarg_in + 16*1], xmm2
+ vmovdqa [ARG + _aesarg_in + 16*2], xmm3
+ vmovdqa [ARG + _aesarg_in + 16*3], xmm4
+ vpaddq xmm5, xmm0, [ARG + _aesarg_out + 16*0]
+ vpaddq xmm6, xmm0, [ARG + _aesarg_out + 16*1]
+ vpaddq xmm7, xmm0, [ARG + _aesarg_out + 16*2]
+ vpaddq xmm8, xmm0, [ARG + _aesarg_out + 16*3]
+ vmovdqa [ARG + _aesarg_out + 16*0], xmm5
+ vmovdqa [ARG + _aesarg_out + 16*1], xmm6
+ vmovdqa [ARG + _aesarg_out + 16*2], xmm7
+ vmovdqa [ARG + _aesarg_out + 16*3], xmm8
+
+;; XMMs are saved at a higher level
+ mov rbp, [GPR_SAVE_AREA + 8*0]
+
+ add rsp, STACK_size
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_cfb_128_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes_cfb_128_avx.asm
new file mode 100644
index 000000000..34d03bb99
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_cfb_128_avx.asm
@@ -0,0 +1,165 @@
+;;
+;; Copyright (c) 2018-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/clear_regs.asm"
+
+;;; Routine to do 128 bit CFB AES encrypt/decrypt operations on one block only.
+;;; It processes only one buffer at a time.
+;;; It is designed to manage partial blocks of DOCSIS 3.1 SEC BPI
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX R9 R10 R11
+;; Windows preserves: RBX RCX RDX RBP RSI RDI R8 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX R9 R10
+;; Linux preserves: RBX RCX RDX RBP RSI RDI R8 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0
+;;
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%define arg5 r8
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%define arg5 [rsp + 5*8]
+%endif
+
+%define OUT arg1
+%define IN arg2
+%define IV arg3
+%define KEYS arg4
+%ifdef LINUX
+%define LEN arg5
+%else
+%define LEN2 arg5
+%define LEN r11
+%endif
+
+%define TMP0 rax
+%define TMP1 r10
+
+%define XDATA xmm0
+%define XIN xmm1
+
+section .text
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cfb_128_one(void *out, void *in, void *iv, void *keys, uint64_t len)
+;; arg 1: OUT : addr to put clear/cipher text out
+;; arg 2: IN : addr to take cipher/clear text from
+;; arg 3: IV : initialization vector
+;; arg 4: KEYS: pointer to expanded keys structure (16 byte aligned)
+;; arg 5: LEN: length of the text to encrypt/decrypt (valid range is 0 to 16)
+;;
+;; AES CFB128 one block encrypt/decrypt implementation.
+;; The function doesn't update IV. The result of operation can be found in OUT.
+;;
+;; It is primarly designed to process partial block of
+;; DOCSIS 3.1 AES Packet PDU Encryption (I.10)
+;;
+;; It process up to one block only (up to 16 bytes).
+;;
+;; It makes sure not to read more than LEN bytes from IN and
+;; not to store more than LEN bytes to OUT.
+MKGLOBAL(aes_cfb_128_one_avx,function,)
+MKGLOBAL(aes_cfb_128_one_avx2,function,)
+MKGLOBAL(aes_cfb_128_one_avx512,function,)
+align 32
+aes_cfb_128_one_avx:
+aes_cfb_128_one_avx2:
+aes_cfb_128_one_avx512:
+%ifndef LINUX
+ mov LEN, LEN2
+%endif
+%ifdef SAFE_PARAM
+ cmp IV, 0
+ jz exit_cfb
+
+ cmp KEYS, 0
+ jz exit_cfb
+
+ cmp LEN, 0
+ jz skip_in_out_check
+
+ cmp OUT, 0
+ jz exit_cfb
+
+ cmp IN, 0
+ jz exit_cfb
+
+skip_in_out_check:
+%endif
+ simd_load_avx_16 XIN, IN, LEN
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu XDATA, [IV] ; IV (or next to last block)
+ vpxor XDATA, XDATA, [KEYS + 16*0] ; 0. ARK
+ vaesenc XDATA, XDATA, [KEYS + 16*1] ; 1. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*2] ; 2. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*3] ; 3. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*4] ; 4. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*5] ; 5. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*6] ; 6. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*7] ; 7. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*8] ; 8. ENC
+ vaesenc XDATA, XDATA, [KEYS + 16*9] ; 9. ENC
+ vaesenclast XDATA, XDATA, [KEYS + 16*10] ; 10. ENC
+
+ vpxor XDATA, XIN ; plaintext/ciphertext XOR block cipher encryption
+
+ simd_store_avx OUT, XDATA, LEN, TMP0, TMP1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%ifdef SAFE_DATA
+ ;; XDATA and XIN are the only scratch SIMD registers used
+ clear_xmms_avx XDATA, XIN
+ clear_scratch_gps_asm
+%endif
+exit_cfb:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_ecb_by4_avx.asm b/src/spdk/intel-ipsec-mb/avx/aes_ecb_by4_avx.asm
new file mode 100644
index 000000000..d71bd8c46
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_ecb_by4_avx.asm
@@ -0,0 +1,654 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES ECB encrypt/decrypt on 16n bytes doing AES by 4
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void aes_ecb_x_y_avx(void *in,
+; UINT128 keys[],
+; void *out,
+; UINT64 len_bytes);
+;
+; x = direction (enc/dec)
+; y = key size (128/192/256)
+; arg 1: IN: pointer to input (cipher text)
+; arg 2: KEYS: pointer to keys
+; arg 3: OUT: pointer to output (plain text)
+; arg 4: LEN: length in bytes (multiple of 16)
+;
+
+%include "include/os.asm"
+
+%ifndef AES_ECB_ENC_128
+%define AES_ECB_ENC_128 aes_ecb_enc_128_avx
+%define AES_ECB_ENC_192 aes_ecb_enc_192_avx
+%define AES_ECB_ENC_256 aes_ecb_enc_256_avx
+%define AES_ECB_DEC_128 aes_ecb_dec_128_avx
+%define AES_ECB_DEC_192 aes_ecb_dec_192_avx
+%define AES_ECB_DEC_256 aes_ecb_dec_256_avx
+%endif
+
+%ifdef LINUX
+%define IN rdi
+%define KEYS rsi
+%define OUT rdx
+%define LEN rcx
+%else
+%define IN rcx
+%define KEYS rdx
+%define OUT r8
+%define LEN r9
+%endif
+
+%define IDX rax
+%define TMP IDX
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XKEY0 xmm4
+%define XKEY2 xmm5
+%define XKEY4 xmm6
+%define XKEY6 xmm7
+%define XKEY10 xmm8
+%define XKEY_A xmm9
+%define XKEY_B xmm10
+
+section .text
+
+%macro AES_ECB 2
+%define %%NROUNDS %1 ; [in] Number of AES rounds, numerical value
+%define %%DIR %2 ; [in] Direction (encrypt/decrypt)
+
+%ifidn %%DIR, ENC
+%define AES vaesenc
+%define AES_LAST vaesenclast
+%else ; DIR = DEC
+%define AES vaesdec
+%define AES_LAST vaesdeclast
+%endif
+ mov TMP, LEN
+ and TMP, 3*16
+ jz %%initial_4
+ cmp TMP, 2*16
+ jb %%initial_1
+ ja %%initial_3
+
+%%initial_2:
+ ; load plain/cipher text
+ vmovdqu XDATA0, [IN + 0*16]
+ vmovdqu XDATA1, [IN + 1*16]
+
+ vmovdqa XKEY0, [KEYS + 0*16]
+
+ vpxor XDATA0, XKEY0 ; 0. ARK
+ vpxor XDATA1, XKEY0
+
+ vmovdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, [KEYS + 1*16] ; 1. ENC
+ AES XDATA1, [KEYS + 1*16]
+
+ mov IDX, 2*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+
+ vmovdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, [KEYS + 3*16] ; 3. ENC
+ AES XDATA1, [KEYS + 3*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+
+ vmovdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, [KEYS + 5*16] ; 5. ENC
+ AES XDATA1, [KEYS + 5*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+
+ vmovdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, [KEYS + 7*16] ; 7. ENC
+ AES XDATA1, [KEYS + 7*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+
+ vmovdqa XKEY10, [KEYS + 10*16]
+
+ AES XDATA0, [KEYS + 9*16] ; 9. ENC
+ AES XDATA1, [KEYS + 9*16]
+
+%if %%NROUNDS >= 12
+ AES XDATA0, XKEY10 ; 10. ENC
+ AES XDATA1, XKEY10
+
+ AES XDATA0, [KEYS + 11*16] ; 11. ENC
+ AES XDATA1, [KEYS + 11*16]
+%endif
+
+%if %%NROUNDS == 14
+ AES XDATA0, [KEYS + 12*16] ; 12. ENC
+ AES XDATA1, [KEYS + 12*16]
+
+ AES XDATA0, [KEYS + 13*16] ; 13. ENC
+ AES XDATA1, [KEYS + 13*16]
+%endif
+
+%if %%NROUNDS == 10
+ AES_LAST XDATA0, XKEY10 ; 10. ENC
+ AES_LAST XDATA1, XKEY10
+%elif %%NROUNDS == 12
+ AES_LAST XDATA0, [KEYS + 12*16] ; 12. ENC
+ AES_LAST XDATA1, [KEYS + 12*16]
+%else
+ AES_LAST XDATA0, [KEYS + 14*16] ; 14. ENC
+ AES_LAST XDATA1, [KEYS + 14*16]
+%endif
+ vmovdqu [OUT + 0*16], XDATA0
+ vmovdqu [OUT + 1*16], XDATA1
+
+ cmp LEN, 2*16
+ je %%done
+ jmp %%main_loop
+
+
+ align 16
+%%initial_1:
+ ; load plain/cipher text
+ vmovdqu XDATA0, [IN + 0*16]
+
+ vmovdqa XKEY0, [KEYS + 0*16]
+
+ vpxor XDATA0, XKEY0 ; 0. ARK
+
+ vmovdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, [KEYS + 1*16] ; 1. ENC
+
+ mov IDX, 1*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+
+ vmovdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, [KEYS + 3*16] ; 3. ENC
+
+ AES XDATA0, XKEY4 ; 4. ENC
+
+ vmovdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, [KEYS + 5*16] ; 5. ENC
+
+ AES XDATA0, XKEY6 ; 6. ENC
+
+ vmovdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, [KEYS + 7*16] ; 7. ENC
+
+ AES XDATA0, XKEY_B ; 8. ENC
+
+ vmovdqa XKEY10, [KEYS + 10*16]
+
+ AES XDATA0, [KEYS + 9*16] ; 9. ENC
+
+%if %%NROUNDS >= 12
+ AES XDATA0, XKEY10 ; 10. ENC
+
+ AES XDATA0, [KEYS + 11*16] ; 11. ENC
+%endif
+
+%if %%NROUNDS == 14
+ AES XDATA0, [KEYS + 12*16] ; 12. ENC
+
+ AES XDATA0, [KEYS + 13*16] ; 13. ENC
+%endif
+
+%if %%NROUNDS == 10
+
+ AES_LAST XDATA0, XKEY10 ; 10. ENC
+%elif %%NROUNDS == 12
+ AES_LAST XDATA0, [KEYS + 12*16] ; 12. ENC
+%else
+ AES_LAST XDATA0, [KEYS + 14*16] ; 14. ENC
+%endif
+
+ vmovdqu [OUT + 0*16], XDATA0
+
+ cmp LEN, 1*16
+ je %%done
+ jmp %%main_loop
+
+
+%%initial_3:
+ ; load plain/cipher text
+ vmovdqu XDATA0, [IN + 0*16]
+ vmovdqu XDATA1, [IN + 1*16]
+ vmovdqu XDATA2, [IN + 2*16]
+
+ vmovdqa XKEY0, [KEYS + 0*16]
+
+ vmovdqa XKEY_A, [KEYS + 1*16]
+
+ vpxor XDATA0, XKEY0 ; 0. ARK
+ vpxor XDATA1, XKEY0
+ vpxor XDATA2, XKEY0
+
+ vmovdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 3*16]
+ mov IDX, 3*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+
+ vmovdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+
+ vmovdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+
+ vmovdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+%if %%NROUNDS >= 12
+ vmovdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+%endif
+
+%if %%NROUNDS == 14
+ vmovdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+
+ vmovdqu [OUT + 0*16], XDATA0
+ vmovdqu [OUT + 1*16], XDATA1
+ vmovdqu [OUT + 2*16], XDATA2
+
+ cmp LEN, 3*16
+ je %%done
+ jmp %%main_loop
+
+
+ align 16
+%%initial_4:
+ ; load plain/cipher text
+ vmovdqu XDATA0, [IN + 0*16]
+ vmovdqu XDATA1, [IN + 1*16]
+ vmovdqu XDATA2, [IN + 2*16]
+ vmovdqu XDATA3, [IN + 3*16]
+
+ vmovdqa XKEY0, [KEYS + 0*16]
+
+ vmovdqa XKEY_A, [KEYS + 1*16]
+
+ vpxor XDATA0, XKEY0 ; 0. ARK
+ vpxor XDATA1, XKEY0
+ vpxor XDATA2, XKEY0
+ vpxor XDATA3, XKEY0
+
+ vmovdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 3*16]
+
+ mov IDX, 4*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+ AES XDATA3, XKEY2
+
+ vmovdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+ AES XDATA3, XKEY4
+
+ vmovdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+ AES XDATA3, XKEY6
+
+ vmovdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+%if %%NROUNDS >= 12
+ vmovdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+%if %%NROUNDS == 14
+ vmovdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+ AES_LAST XDATA3, XKEY_B
+
+ vmovdqu [OUT + 0*16], XDATA0
+ vmovdqu [OUT + 1*16], XDATA1
+ vmovdqu [OUT + 2*16], XDATA2
+ vmovdqu [OUT + 3*16], XDATA3
+
+ cmp LEN, 4*16
+ jz %%done
+ jmp %%main_loop
+
+ align 16
+%%main_loop:
+ ; load plain/cipher text
+ vmovdqu XDATA0, [IN + IDX + 0*16]
+ vmovdqu XDATA1, [IN + IDX + 1*16]
+ vmovdqu XDATA2, [IN + IDX + 2*16]
+ vmovdqu XDATA3, [IN + IDX + 3*16]
+
+ vmovdqa XKEY_A, [KEYS + 1*16]
+
+ vpxor XDATA0, XKEY0 ; 0. ARK
+ vpxor XDATA1, XKEY0
+ vpxor XDATA2, XKEY0
+ vpxor XDATA3, XKEY0
+
+ add IDX, 4*16
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 3*16]
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+ AES XDATA3, XKEY2
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+ AES XDATA3, XKEY4
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+ AES XDATA3, XKEY6
+
+ vmovdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ vmovdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+%if %%NROUNDS >= 12
+ vmovdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+%if %%NROUNDS == 14
+ vmovdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ vmovdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+ AES_LAST XDATA3, XKEY_B
+
+ vmovdqu [OUT + IDX + 0*16 - 4*16], XDATA0
+ vmovdqu [OUT + IDX + 1*16 - 4*16], XDATA1
+ vmovdqu [OUT + IDX + 2*16 - 4*16], XDATA2
+ vmovdqu [OUT + IDX + 3*16 - 4*16], XDATA3
+
+ cmp IDX, LEN
+ jne %%main_loop
+
+%%done:
+
+ ret
+
+%endmacro
+
+align 16
+MKGLOBAL(AES_ECB_ENC_128,function,internal)
+AES_ECB_ENC_128:
+
+ AES_ECB 10, ENC
+
+align 16
+MKGLOBAL(AES_ECB_ENC_192,function,internal)
+AES_ECB_ENC_192:
+
+ AES_ECB 12, ENC
+
+align 16
+MKGLOBAL(AES_ECB_ENC_256,function,internal)
+AES_ECB_ENC_256:
+
+ AES_ECB 14, ENC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_128,function,internal)
+AES_ECB_DEC_128:
+
+ AES_ECB 10, DEC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_192,function,internal)
+AES_ECB_DEC_192:
+
+ AES_ECB 12, DEC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_256,function,internal)
+AES_ECB_DEC_256:
+
+ AES_ECB 14, DEC
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/aes_xcbc_mac_128_x8.asm b/src/spdk/intel-ipsec-mb/avx/aes_xcbc_mac_128_x8.asm
new file mode 100644
index 000000000..615e19050
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/aes_xcbc_mac_128_x8.asm
@@ -0,0 +1,418 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do 128 bit AES XCBC
+
+;; clobbers all registers except for ARG1 and rbp
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%macro VPXOR2 2
+ vpxor %1, %1, %2
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_XCBC_ARGS_x8 {
+;; void* in[8];
+;; UINT128* keys[8];
+;; UINT128 ICV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_xcbc_mac_128_x8(AES_XCBC_ARGS_x8 *args, UINT64 len);
+;; arg 1: ARG : addr of AES_XCBC_ARGS_x8 structure
+;; arg 2: LEN : len (in units of bytes)
+
+struc STACK
+_gpr_save: resq 1
+_len: resq 1
+endstruc
+
+%define GPR_SAVE_AREA rsp + _gpr_save
+%define LEN_AREA rsp + _len
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+%define TMP rbx
+
+%define KEYS0 REG3
+%define KEYS1 REG4
+%define KEYS2 rbp
+%define KEYS3 r8
+%define KEYS4 r9
+%define KEYS5 r10
+%define KEYS6 r11
+%define KEYS7 r12
+
+%define IN0 r13
+%define IN2 r14
+%define IN4 r15
+%define IN6 LEN
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XDATA4 xmm4
+%define XDATA5 xmm5
+%define XDATA6 xmm6
+%define XDATA7 xmm7
+
+%define XKEY0_3 xmm8
+%define XKEY1_4 xmm9
+%define XKEY2_5 xmm10
+%define XKEY3_6 xmm11
+%define XKEY4_7 xmm12
+%define XKEY5_8 xmm13
+%define XKEY6_9 xmm14
+%define XTMP xmm15
+
+section .text
+MKGLOBAL(aes_xcbc_mac_128_x8,function,internal)
+aes_xcbc_mac_128_x8:
+
+ sub rsp, STACK_size
+ mov [GPR_SAVE_AREA + 8*0], rbp
+
+ mov IDX, 16
+ mov [LEN_AREA], LEN
+
+ mov IN0, [ARG + _aesxcbcarg_in + 8*0]
+ mov IN2, [ARG + _aesxcbcarg_in + 8*2]
+ mov IN4, [ARG + _aesxcbcarg_in + 8*4]
+ mov IN6, [ARG + _aesxcbcarg_in + 8*6]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ mov TMP, [ARG + _aesxcbcarg_in + 8*1]
+ VMOVDQ XDATA0, [IN0] ; load first block of plain text
+ VMOVDQ XDATA1, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*3]
+ VMOVDQ XDATA2, [IN2] ; load first block of plain text
+ VMOVDQ XDATA3, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*5]
+ VMOVDQ XDATA4, [IN4] ; load first block of plain text
+ VMOVDQ XDATA5, [TMP] ; load first block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*7]
+ VMOVDQ XDATA6, [IN6] ; load first block of plain text
+ VMOVDQ XDATA7, [TMP] ; load first block of plain text
+
+
+ VPXOR2 XDATA0, [ARG + _aesxcbcarg_ICV + 16*0] ; plaintext XOR ICV
+ VPXOR2 XDATA1, [ARG + _aesxcbcarg_ICV + 16*1] ; plaintext XOR ICV
+ VPXOR2 XDATA2, [ARG + _aesxcbcarg_ICV + 16*2] ; plaintext XOR ICV
+ VPXOR2 XDATA3, [ARG + _aesxcbcarg_ICV + 16*3] ; plaintext XOR ICV
+ VPXOR2 XDATA4, [ARG + _aesxcbcarg_ICV + 16*4] ; plaintext XOR ICV
+ VPXOR2 XDATA5, [ARG + _aesxcbcarg_ICV + 16*5] ; plaintext XOR ICV
+ VPXOR2 XDATA6, [ARG + _aesxcbcarg_ICV + 16*6] ; plaintext XOR ICV
+ VPXOR2 XDATA7, [ARG + _aesxcbcarg_ICV + 16*7] ; plaintext XOR ICV
+
+ mov KEYS0, [ARG + _aesxcbcarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesxcbcarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesxcbcarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesxcbcarg_keys + 8*3]
+ mov KEYS4, [ARG + _aesxcbcarg_keys + 8*4]
+ mov KEYS5, [ARG + _aesxcbcarg_keys + 8*5]
+ mov KEYS6, [ARG + _aesxcbcarg_keys + 8*6]
+ mov KEYS7, [ARG + _aesxcbcarg_keys + 8*7]
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vmovdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vmovdqa XKEY1_4, [KEYS1 + 16*4] ; load round 4 key
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vmovdqa XKEY2_5, [KEYS2 + 16*5] ; load round 5 key
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vmovdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vmovdqa XKEY4_7, [KEYS4 + 16*7] ; load round 7 key
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vmovdqa XKEY5_8, [KEYS5 + 16*8] ; load round 8 key
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vmovdqa XKEY6_9, [KEYS6 + 16*9] ; load round 9 key
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+ vaesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ cmp [LEN_AREA], IDX
+ je done
+
+main_loop:
+ mov TMP, [ARG + _aesxcbcarg_in + 8*1]
+ VPXOR2 XDATA0, [IN0 + IDX] ; load next block of plain text
+ VPXOR2 XDATA1, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*3]
+ VPXOR2 XDATA2, [IN2 + IDX] ; load next block of plain text
+ VPXOR2 XDATA3, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*5]
+ VPXOR2 XDATA4, [IN4 + IDX] ; load next block of plain text
+ VPXOR2 XDATA5, [TMP + IDX] ; load next block of plain text
+ mov TMP, [ARG + _aesxcbcarg_in + 8*7]
+ VPXOR2 XDATA6, [IN6 + IDX] ; load next block of plain text
+ VPXOR2 XDATA7, [TMP + IDX] ; load next block of plain text
+
+
+ VPXOR2 XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ VPXOR2 XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ VPXOR2 XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ VPXOR2 XDATA3, [KEYS3 + 16*0] ; 0. ARK
+ VPXOR2 XDATA4, [KEYS4 + 16*0] ; 0. ARK
+ VPXOR2 XDATA5, [KEYS5 + 16*0] ; 0. ARK
+ VPXOR2 XDATA6, [KEYS6 + 16*0] ; 0. ARK
+ VPXOR2 XDATA7, [KEYS7 + 16*0] ; 0. ARK
+
+ vaesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ vaesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ vaesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ vaesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+ vaesenc XDATA4, [KEYS4 + 16*1] ; 1. ENC
+ vaesenc XDATA5, [KEYS5 + 16*1] ; 1. ENC
+ vaesenc XDATA6, [KEYS6 + 16*1] ; 1. ENC
+ vaesenc XDATA7, [KEYS7 + 16*1] ; 1. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ vaesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ vaesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ vaesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+ vaesenc XDATA4, [KEYS4 + 16*2] ; 2. ENC
+ vaesenc XDATA5, [KEYS5 + 16*2] ; 2. ENC
+ vaesenc XDATA6, [KEYS6 + 16*2] ; 2. ENC
+ vaesenc XDATA7, [KEYS7 + 16*2] ; 2. ENC
+
+ vaesenc XDATA0, XKEY0_3 ; 3. ENC
+ vaesenc XDATA1, [KEYS1 + 16*3] ; 3. ENC
+ vaesenc XDATA2, [KEYS2 + 16*3] ; 3. ENC
+ vaesenc XDATA3, [KEYS3 + 16*3] ; 3. ENC
+ vaesenc XDATA4, [KEYS4 + 16*3] ; 3. ENC
+ vaesenc XDATA5, [KEYS5 + 16*3] ; 3. ENC
+ vaesenc XDATA6, [KEYS6 + 16*3] ; 3. ENC
+ vaesenc XDATA7, [KEYS7 + 16*3] ; 3. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ vaesenc XDATA1, XKEY1_4 ; 4. ENC
+ vaesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ vaesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+ vaesenc XDATA4, [KEYS4 + 16*4] ; 4. ENC
+ vaesenc XDATA5, [KEYS5 + 16*4] ; 4. ENC
+ vaesenc XDATA6, [KEYS6 + 16*4] ; 4. ENC
+ vaesenc XDATA7, [KEYS7 + 16*4] ; 4. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ vaesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ vaesenc XDATA2, XKEY2_5 ; 5. ENC
+ vaesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+ vaesenc XDATA4, [KEYS4 + 16*5] ; 5. ENC
+ vaesenc XDATA5, [KEYS5 + 16*5] ; 5. ENC
+ vaesenc XDATA6, [KEYS6 + 16*5] ; 5. ENC
+ vaesenc XDATA7, [KEYS7 + 16*5] ; 5. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*6] ; 6. ENC
+ vaesenc XDATA1, [KEYS1 + 16*6] ; 6. ENC
+ vaesenc XDATA2, [KEYS2 + 16*6] ; 6. ENC
+ vaesenc XDATA3, XKEY3_6 ; 6. ENC
+ vaesenc XDATA4, [KEYS4 + 16*6] ; 6. ENC
+ vaesenc XDATA5, [KEYS5 + 16*6] ; 6. ENC
+ vaesenc XDATA6, [KEYS6 + 16*6] ; 6. ENC
+ vaesenc XDATA7, [KEYS7 + 16*6] ; 6. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ vaesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ vaesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ vaesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+ vaesenc XDATA4, XKEY4_7 ; 7. ENC
+ vaesenc XDATA5, [KEYS5 + 16*7] ; 7. ENC
+ vaesenc XDATA6, [KEYS6 + 16*7] ; 7. ENC
+ vaesenc XDATA7, [KEYS7 + 16*7] ; 7. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ vaesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ vaesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ vaesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+ vaesenc XDATA4, [KEYS4 + 16*8] ; 8. ENC
+ vaesenc XDATA5, XKEY5_8 ; 8. ENC
+ vaesenc XDATA6, [KEYS6 + 16*8] ; 8. ENC
+ vaesenc XDATA7, [KEYS7 + 16*8] ; 8. ENC
+
+ vaesenc XDATA0, [KEYS0 + 16*9] ; 9. ENC
+ vaesenc XDATA1, [KEYS1 + 16*9] ; 9. ENC
+ vaesenc XDATA2, [KEYS2 + 16*9] ; 9. ENC
+ vaesenc XDATA3, [KEYS3 + 16*9] ; 9. ENC
+ vaesenc XDATA4, [KEYS4 + 16*9] ; 9. ENC
+ vaesenc XDATA5, [KEYS5 + 16*9] ; 9. ENC
+ vaesenc XDATA6, XKEY6_9 ; 9. ENC
+ vaesenc XDATA7, [KEYS7 + 16*9] ; 9. ENC
+
+
+ vaesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ vaesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ vaesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ vaesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+ vaesenclast XDATA4, [KEYS4 + 16*10] ; 10. ENC
+ vaesenclast XDATA5, [KEYS5 + 16*10] ; 10. ENC
+ vaesenclast XDATA6, [KEYS6 + 16*10] ; 10. ENC
+ vaesenclast XDATA7, [KEYS7 + 16*10] ; 10. ENC
+
+ add IDX, 16
+ cmp [LEN_AREA], IDX
+ jne main_loop
+
+done:
+ ;; update ICV
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*0], XDATA0
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*1], XDATA1
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*2], XDATA2
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*3], XDATA3
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*4], XDATA4
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*5], XDATA5
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*6], XDATA6
+ vmovdqa [ARG + _aesxcbcarg_ICV + 16*7], XDATA7
+
+ ;; update IN
+ vmovd xmm0, [LEN_AREA]
+ vpshufd xmm0, xmm0, 0x44
+ vpaddq xmm1, xmm0, [ARG + _aesxcbcarg_in + 16*0]
+ vpaddq xmm2, xmm0, [ARG + _aesxcbcarg_in + 16*1]
+ vpaddq xmm3, xmm0, [ARG + _aesxcbcarg_in + 16*2]
+ vpaddq xmm4, xmm0, [ARG + _aesxcbcarg_in + 16*3]
+ vmovdqa [ARG + _aesxcbcarg_in + 16*0], xmm1
+ vmovdqa [ARG + _aesxcbcarg_in + 16*1], xmm2
+ vmovdqa [ARG + _aesxcbcarg_in + 16*2], xmm3
+ vmovdqa [ARG + _aesxcbcarg_in + 16*3], xmm4
+
+;; XMMs are saved at a higher level
+ mov rbp, [GPR_SAVE_AREA + 8*0]
+
+ add rsp, STACK_size
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/gcm128_avx_gen2.asm b/src/spdk/intel-ipsec-mb/avx/gcm128_avx_gen2.asm
new file mode 100644
index 000000000..1bb601e4f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/gcm128_avx_gen2.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM128_MODE 1
+%include "avx/gcm_avx_gen2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/gcm192_avx_gen2.asm b/src/spdk/intel-ipsec-mb/avx/gcm192_avx_gen2.asm
new file mode 100644
index 000000000..4de59d5bf
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/gcm192_avx_gen2.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM192_MODE 1
+%include "avx/gcm_avx_gen2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/gcm256_avx_gen2.asm b/src/spdk/intel-ipsec-mb/avx/gcm256_avx_gen2.asm
new file mode 100644
index 000000000..de8eadf4c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/gcm256_avx_gen2.asm
@@ -0,0 +1,30 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%define GCM256_MODE 1
+%include "avx/gcm_avx_gen2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/gcm_avx_gen2.asm b/src/spdk/intel-ipsec-mb/avx/gcm_avx_gen2.asm
new file mode 100644
index 000000000..2aa3a162d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/gcm_avx_gen2.asm
@@ -0,0 +1,2515 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2019 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+;
+;
+; References:
+; This code was derived and highly optimized from the code described in paper:
+; Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on Intel Architecture Processors. August, 2010
+;
+; For the shift-based reductions used in this code, we used the method described in paper:
+; Shay Gueron, Michael E. Kounavis. Intel Carry-Less Multiplication Instruction and its Usage for Computing the GCM Mode. January, 2010.
+;
+;
+;
+;
+; Assumptions:
+;
+;
+;
+; iv:
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Salt (From the SA) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Initialization Vector |
+; | (This is the sequence number from IPSec header) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x1 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+;
+;
+; AAD:
+; AAD will be padded with 0 to the next 16byte multiple
+; for example, assume AAD is a u32 vector
+;
+; if AAD is 8 bytes:
+; AAD[3] = {A0, A1};
+; padded AAD in xmm register = {A1 A0 0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A1) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 32-bit Sequence Number (A0) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 32-bit Sequence Number
+;
+; if AAD is 12 bytes:
+; AAD[3] = {A0, A1, A2};
+; padded AAD in xmm register = {A2 A1 A0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A2) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 64-bit Extended Sequence Number {A1,A0} |
+; | |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 64-bit Extended Sequence Number
+;
+;
+; aadLen:
+; Must be a multiple of 4 bytes and from the definition of the spec.
+; The code additionally supports any aadLen length.
+;
+; TLen:
+; from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+;
+; poly = x^128 + x^127 + x^126 + x^121 + 1
+; throughout the code, one tab and two tab indentations are used. one tab is for GHASH part, two tabs is for AES part.
+;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/clear_regs.asm"
+%include "include/gcm_defines.asm"
+%include "include/gcm_keys_sse_avx.asm"
+%include "include/memcpy.asm"
+
+%ifndef GCM128_MODE
+%ifndef GCM192_MODE
+%ifndef GCM256_MODE
+%error "No GCM mode selected for gcm_avx_gen2.asm!"
+%endif
+%endif
+%endif
+
+%ifdef GCM128_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _128 %+ y %+ avx_gen2
+%define NROUNDS 9
+%endif
+
+%ifdef GCM192_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _192 %+ y %+ avx_gen2
+%define NROUNDS 11
+%endif
+
+%ifdef GCM256_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _256 %+ y %+ avx_gen2
+%define NROUNDS 13
+%endif
+
+default rel
+; need to push 4 registers into stack to maintain
+%define STACK_OFFSET 8*4
+
+%define TMP2 16*0 ; Temporary storage for AES State 2 (State 1 is stored in an XMM register)
+%define TMP3 16*1 ; Temporary storage for AES State 3
+%define TMP4 16*2 ; Temporary storage for AES State 4
+%define TMP5 16*3 ; Temporary storage for AES State 5
+%define TMP6 16*4 ; Temporary storage for AES State 6
+%define TMP7 16*5 ; Temporary storage for AES State 7
+%define TMP8 16*6 ; Temporary storage for AES State 8
+
+%define LOCAL_STORAGE 16*7
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define VARIABLE_OFFSET LOCAL_STORAGE + XMM_STORAGE
+
+section .text
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+; Input: A and B (128-bits each, bit-reflected)
+; Output: C = A*B*x mod poly, (i.e. >>1 )
+; To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+; GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GHASH_MUL 7
+%define %%GH %1 ; 16 Bytes
+%define %%HK %2 ; 16 Bytes
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Karatsuba
+ vpshufd %%T2, %%GH, 01001110b
+ vpshufd %%T3, %%HK, 01001110b
+ vpxor %%T2, %%T2, %%GH ; %%T2 = (a1+a0)
+ vpxor %%T3, %%T3, %%HK ; %%T3 = (b1+b0)
+
+ vpclmulqdq %%T1, %%GH, %%HK, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%GH, %%HK, 0x00 ; %%GH = a0*b0
+ vpclmulqdq %%T2, %%T3, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ vpxor %%T2, %%T2, %%GH
+ vpxor %%T2, %%T2, %%T1 ; %%T2 = a0*b1+a1*b0
+
+ vpslldq %%T3, %%T2, 8 ; shift-L %%T3 2 DWs
+ vpsrldq %%T2, %%T2, 8 ; shift-R %%T2 2 DWs
+ vpxor %%GH, %%GH, %%T3
+ vpxor %%T1, %%T1, %%T2 ; <%%T1:%%GH> = %%GH x %%HK
+
+ ;first phase of the reduction
+ vpslld %%T2, %%GH, 31 ; packed right shifting << 31
+ vpslld %%T3, %%GH, 30 ; packed right shifting shift << 30
+ vpslld %%T4, %%GH, 25 ; packed right shifting shift << 25
+
+ vpxor %%T2, %%T2, %%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2, %%T4
+
+ vpsrldq %%T5, %%T2, 4 ; shift-R %%T5 1 DW
+
+ vpslldq %%T2, %%T2, 12 ; shift-L %%T2 3 DWs
+ vpxor %%GH, %%GH, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;second phase of the reduction
+
+ vpsrld %%T2,%%GH,1 ; packed left shifting >> 1
+ vpsrld %%T3,%%GH,2 ; packed left shifting >> 2
+ vpsrld %%T4,%%GH,7 ; packed left shifting >> 7
+ vpxor %%T2, %%T2, %%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2, %%T4
+
+ vpxor %%T2, %%T2, %%T5
+ vpxor %%GH, %%GH, %%T2
+ vpxor %%GH, %%GH, %%T1 ; the result is in %%GH
+
+
+%endmacro
+
+
+%macro PRECOMPUTE 8
+%define %%GDATA %1
+%define %%HK %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+%define %%T6 %8
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vmovdqa %%T5, %%HK
+
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^2<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_2], %%T5 ; [HashKey_2] = HashKey^2<<1 mod poly
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_2_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^3<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_3], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_3_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^4<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_4], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_4_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^5<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_5], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_5_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^6<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_6], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_6_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^7<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_7], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_7_k], %%T1
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^8<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_8], %%T5
+ vpshufd %%T1, %%T5, 01001110b
+ vpxor %%T1, %%T5
+ vmovdqu [%%GDATA + HashKey_8_k], %%T1
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; READ_SMALL_DATA_INPUT: Packs xmm register with data when data input is less than 16 bytes.
+; Returns 0 if data has length 0.
+; Input: The input data (INPUT), that data's length (LENGTH).
+; Output: The packed xmm register (OUTPUT).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro READ_SMALL_DATA_INPUT 6
+%define %%OUTPUT %1 ; %%OUTPUT is an xmm register
+%define %%INPUT %2
+%define %%LENGTH %3
+%define %%END_READ_LOCATION %4 ; All this and the lower inputs are temp registers
+%define %%COUNTER %5
+%define %%TMP1 %6
+
+ vpxor %%OUTPUT, %%OUTPUT
+ mov %%COUNTER, %%LENGTH
+ mov %%END_READ_LOCATION, %%INPUT
+ add %%END_READ_LOCATION, %%LENGTH
+ xor %%TMP1, %%TMP1
+
+
+ cmp %%COUNTER, 8
+ jl %%_byte_loop_2
+ vpinsrq %%OUTPUT, [%%INPUT],0 ;Read in 8 bytes if they exists
+ je %%_done
+
+ sub %%COUNTER, 8
+
+%%_byte_loop_1: ;Read in data 1 byte at a time while data is left
+ shl %%TMP1, 8 ;This loop handles when 8 bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_1
+ vpinsrq %%OUTPUT, %%TMP1, 1
+ jmp %%_done
+
+%%_byte_loop_2: ;Read in data 1 byte at a time while data is left
+ cmp %%COUNTER, 0
+ je %%_done
+ shl %%TMP1, 8 ;This loop handles when no bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_2
+ vpinsrq %%OUTPUT, %%TMP1, 0
+%%_done:
+
+%endmacro ; READ_SMALL_DATA_INPUT
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+; Input: The input data (A_IN), that data's length (A_LEN), and the hash key (HASH_KEY).
+; Output: The hash of the data (AAD_HASH).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CALC_AAD_HASH 15
+%define %%A_IN %1
+%define %%A_LEN %2
+%define %%AAD_HASH %3
+%define %%GDATA_KEY %4
+%define %%XTMP0 %5 ; xmm temp reg 5
+%define %%XTMP1 %6 ; xmm temp reg 5
+%define %%XTMP2 %7
+%define %%XTMP3 %8
+%define %%XTMP4 %9
+%define %%XTMP5 %10 ; xmm temp reg 5
+%define %%T1 %11 ; temp reg 1
+%define %%T2 %12
+%define %%T3 %13
+%define %%T4 %14
+%define %%T5 %15 ; temp reg 5
+
+
+ mov %%T1, %%A_IN ; T1 = AAD
+ mov %%T2, %%A_LEN ; T2 = aadLen
+ vpxor %%AAD_HASH, %%AAD_HASH
+
+%%_get_AAD_loop128:
+ cmp %%T2, 128
+ jl %%_exit_AAD_loop128
+
+ vmovdqu %%XTMP0, [%%T1 + 16*0]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_8]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+%assign i 1
+%assign j 7
+%rep 7
+ vmovdqu %%XTMP0, [%%T1 + 16*i]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_ %+ j]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+%assign i (i + 1)
+%assign j (j - 1)
+%endrep
+
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ sub %%T2, 128
+ je %%_CALC_AAD_done
+
+ add %%T1, 128
+ jmp %%_get_AAD_loop128
+
+%%_exit_AAD_loop128:
+ cmp %%T2, 16
+ jl %%_get_small_AAD_block
+
+ ;; calculate hash_key position to start with
+ mov %%T3, %%T2
+ and %%T3, -16 ; 1 to 7 blocks possible here
+ neg %%T3
+ add %%T3, HashKey_1 + 16
+ lea %%T3, [%%GDATA_KEY + %%T3]
+
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16 ; move to next data block
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+
+%%_AAD_blocks:
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+ jmp %%_AAD_blocks
+
+%%_AAD_reduce:
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ or %%T2, %%T2
+ je %%_CALC_AAD_done
+
+%%_get_small_AAD_block:
+ vmovdqu %%XTMP0, [%%GDATA_KEY + HashKey]
+ READ_SMALL_DATA_INPUT %%XTMP1, %%T1, %%T2, %%T3, %%T4, %%T5
+ ;byte-reflect the AAD data
+ vpshufb %%XTMP1, [rel SHUF_MASK]
+ vpxor %%AAD_HASH, %%XTMP1
+ GHASH_MUL %%AAD_HASH, %%XTMP0, %%XTMP1, %%XTMP2, %%XTMP3, %%XTMP4, %%XTMP5
+
+%%_CALC_AAD_done:
+
+%endmacro ; CALC_AAD_HASH
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks between update calls.
+; Requires the input data be at least 1 byte long.
+; Input:
+; GDATA_KEY - struct gcm_key_data *
+; GDATA_CTX - struct gcm_context_data *
+; PLAIN_CYPH_IN - input text
+; PLAIN_CYPH_LEN - input text length
+; DATA_OFFSET - the current data offset
+; ENC_DEC - whether encoding or decoding
+; Output: A cypher of the first partial block (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10, r12, r13, r15, xmm0, xmm1, xmm2, xmm3, xmm5, xmm6, xmm9, xmm10, xmm11, xmm13
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro PARTIAL_BLOCK 8
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%DATA_OFFSET %6
+%define %%AAD_HASH %7
+%define %%ENC_DEC %8
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ je %%_partial_block_done ;Leave Macro if no partial blocks
+
+ cmp %%PLAIN_CYPH_LEN, 16 ;Read in input data without over reading
+ jl %%_fewer_than_16_bytes
+ VXLDR xmm1, [%%PLAIN_CYPH_IN] ;If more than 16 bytes of data, just fill the xmm register
+ jmp %%_data_read
+
+%%_fewer_than_16_bytes:
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ READ_SMALL_DATA_INPUT xmm1, r10, %%PLAIN_CYPH_LEN, rax, r12, r15
+
+%%_data_read: ;Finished reading in data
+
+
+ vmovdqu xmm9, [%%GDATA_CTX + PBlockEncKey] ;xmm9 = my_ctx_data.partial_block_enc_key
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ lea r12, [SHIFT_MASK]
+
+ cmp r13, rax
+ add r12, r13 ; adjust the shuffle mask pointer to be able to shift r13 bytes (16-r13 is the number of bytes in plaintext mod 16)
+ vmovdqu xmm2, [r12] ; get the appropriate shuffle mask
+ vpshufb xmm9, xmm2 ;shift right r13 bytes
+
+%ifidn %%ENC_DEC, DEC
+ vmovdqa xmm3, xmm1
+ vpxor xmm9, xmm1 ; Cyphertext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_1 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_1:
+
+ vmovdqu xmm1, [r12 + ALL_F-SHIFT_MASK] ; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ vpand xmm3, xmm1
+ vpshufb xmm3, [SHUF_MASK]
+ vpshufb xmm3, xmm2
+ vpxor %%AAD_HASH, xmm3
+
+
+ cmp r15,0
+ jl %%_partial_incomplete_1
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_dec_done
+%%_partial_incomplete_1:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_dec_done:
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+%else
+ vpxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_2 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_2:
+
+ vmovdqu xmm1, [r12 + ALL_F-SHIFT_MASK] ; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ vpshufb xmm9, [SHUF_MASK]
+ vpshufb xmm9, xmm2
+ vpxor %%AAD_HASH, xmm9
+
+ cmp r15,0
+ jl %%_partial_incomplete_2
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_encode_done
+%%_partial_incomplete_2:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_encode_done:
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+ vpshufb xmm9, [SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+ vpshufb xmm9, xmm2
+%endif
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output encrypted Bytes
+ cmp r15,0
+ jl %%_partial_fill
+ mov r12, r13
+ mov r13, 16
+ sub r13, r12 ; Set r13 to be the number of bytes to write out
+ jmp %%_count_set
+%%_partial_fill:
+ mov r13, %%PLAIN_CYPH_LEN
+%%_count_set:
+ vmovq rax, xmm9
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT+ %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ vpsrldq xmm9, xmm9, 8
+ vmovq rax, xmm9
+ sub r13, 8
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%%_partial_block_done:
+%endmacro ; PARTIAL_BLOCK
+
+
+; if a = number of total plaintext bytes
+; b = floor(a/16)
+; %%num_initial_blocks = b mod 8;
+; encrypt the initial %%num_initial_blocks blocks and apply ghash on the ciphertext
+; %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r14 are used as a pointer only, not modified.
+; Updated AAD_HASH is returned in %%T3
+
+%macro INITIAL_BLOCKS 24
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%LENGTH %5
+%define %%DATA_OFFSET %6
+%define %%num_initial_blocks %7 ; can be 0, 1, 2, 3, 4, 5, 6 or 7
+%define %%T1 %8
+%define %%HASH_KEY %9
+%define %%T3 %10
+%define %%T4 %11
+%define %%T5 %12
+%define %%CTR %13
+%define %%XMM1 %14
+%define %%XMM2 %15
+%define %%XMM3 %16
+%define %%XMM4 %17
+%define %%XMM5 %18
+%define %%XMM6 %19
+%define %%XMM7 %20
+%define %%XMM8 %21
+%define %%T6 %22
+%define %%T_key %23
+%define %%ENC_DEC %24
+
+%assign i (8-%%num_initial_blocks)
+ vmovdqu reg(i), %%XMM8 ; move AAD_HASH to temp reg
+ ; start AES for %%num_initial_blocks blocks
+ vmovdqu %%CTR, [%%GDATA_CTX + CurCount] ; %%CTR = Y0
+
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa reg(i), %%CTR
+ vpshufb reg(i), [SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpxor reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS
+ vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep ; NROUNDS
+
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i) ; write back ciphertext for %%num_initial_blocks blocks
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+ %endif
+ vpshufb reg(i), [SHUF_MASK] ; prepare ciphertext for GHASH computations
+%assign i (i+1)
+%endrep
+
+
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+
+%rep %%num_initial_blocks
+ vpxor reg(j), reg(i)
+ GHASH_MUL reg(j), %%HASH_KEY, %%T1, %%T3, %%T4, %%T5, %%T6 ; apply GHASH on %%num_initial_blocks blocks
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+ ; %%XMM8 has the current Hash Value
+ vmovdqa %%T3, %%XMM8
+
+ cmp %%LENGTH, 128
+ jl %%_initial_blocks_done
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Prepare 8 counter blocks and perform rounds of AES cipher on them, load plain/cipher text and
+; store cipher/plain text.
+; Keep 8 cipher text blocks for further GHASH computations (XMM1 - XMM8)
+; - combine current GHASH value into block 0 (XMM1)
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM1, %%CTR
+ vpshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM2, %%CTR
+ vpshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM3, %%CTR
+ vpshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM4, %%CTR
+ vpshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM5, %%CTR
+ vpshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM6, %%CTR
+ vpshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM7, %%CTR
+ vpshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+
+ vpaddd %%CTR, [ONE] ; INCR Y0
+ vmovdqa %%XMM8, %%CTR
+ vpshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+ vpxor %%XMM1, %%T_key
+ vpxor %%XMM2, %%T_key
+ vpxor %%XMM3, %%T_key
+ vpxor %%XMM4, %%T_key
+ vpxor %%XMM5, %%T_key
+ vpxor %%XMM6, %%T_key
+ vpxor %%XMM7, %%T_key
+ vpxor %%XMM8, %%T_key
+
+
+%assign i 1
+%rep NROUNDS
+ vmovdqu %%T_key, [%%GDATA_KEY+16*i]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+%assign i (i+1)
+%endrep
+
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*i]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*0]
+ vpxor %%XMM1, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*0], %%XMM1
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM1, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*1]
+ vpxor %%XMM2, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*1], %%XMM2
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM2, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*2]
+ vpxor %%XMM3, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*2], %%XMM3
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM3, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*3]
+ vpxor %%XMM4, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*3], %%XMM4
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM4, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*4]
+ vpxor %%XMM5, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*4], %%XMM5
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM5, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*5]
+ vpxor %%XMM6, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*5], %%XMM6
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM6, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*6]
+ vpxor %%XMM7, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*6], %%XMM7
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM7, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*7]
+ vpxor %%XMM8, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*7], %%XMM8
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM8, %%T1
+ %endif
+
+ add %%DATA_OFFSET, 128
+
+ vpshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ vpxor %%XMM1, %%T3 ; combine GHASHed value with the corresponding ciphertext
+ vpshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_initial_blocks_done:
+
+
+%endmacro
+
+
+; encrypt 8 blocks at a time
+; ghash the 8 previously encrypted ciphertext blocks
+; %%GDATA - (GCM key data), %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN are used as pointers only, not modified
+; r11 is the data offset value
+%macro GHASH_8_ENCRYPT_8_PARALLEL 22
+%define %%GDATA %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%DATA_OFFSET %4
+%define %%T1 %5
+%define %%T2 %6
+%define %%T3 %7
+%define %%T4 %8
+%define %%T5 %9
+%define %%T6 %10
+%define %%CTR %11
+%define %%XMM1 %12
+%define %%XMM2 %13
+%define %%XMM3 %14
+%define %%XMM4 %15
+%define %%XMM5 %16
+%define %%XMM6 %17
+%define %%XMM7 %18
+%define %%XMM8 %19
+%define %%T7 %20
+%define %%loop_idx %21
+%define %%ENC_DEC %22
+
+ vmovdqa %%T2, %%XMM1
+ vmovdqu [rsp + TMP2], %%XMM2
+ vmovdqu [rsp + TMP3], %%XMM3
+ vmovdqu [rsp + TMP4], %%XMM4
+ vmovdqu [rsp + TMP5], %%XMM5
+ vmovdqu [rsp + TMP6], %%XMM6
+ vmovdqu [rsp + TMP7], %%XMM7
+ vmovdqu [rsp + TMP8], %%XMM8
+
+%ifidn %%loop_idx, in_order
+ vpaddd %%XMM1, %%CTR, [ONE] ; INCR CNT
+ vpaddd %%XMM2, %%XMM1, [ONE]
+ vpaddd %%XMM3, %%XMM2, [ONE]
+ vpaddd %%XMM4, %%XMM3, [ONE]
+ vpaddd %%XMM5, %%XMM4, [ONE]
+ vpaddd %%XMM6, %%XMM5, [ONE]
+ vpaddd %%XMM7, %%XMM6, [ONE]
+ vpaddd %%XMM8, %%XMM7, [ONE]
+ vmovdqa %%CTR, %%XMM8
+
+ vpshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+%else
+ vpaddd %%XMM1, %%CTR, [ONEf] ; INCR CNT
+ vpaddd %%XMM2, %%XMM1, [ONEf]
+ vpaddd %%XMM3, %%XMM2, [ONEf]
+ vpaddd %%XMM4, %%XMM3, [ONEf]
+ vpaddd %%XMM5, %%XMM4, [ONEf]
+ vpaddd %%XMM6, %%XMM5, [ONEf]
+ vpaddd %%XMM7, %%XMM6, [ONEf]
+ vpaddd %%XMM8, %%XMM7, [ONEf]
+ vmovdqa %%CTR, %%XMM8
+%endif
+
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*0]
+ vpxor %%XMM1, %%T1
+ vpxor %%XMM2, %%T1
+ vpxor %%XMM3, %%T1
+ vpxor %%XMM4, %%T1
+ vpxor %%XMM5, %%T1
+ vpxor %%XMM6, %%T1
+ vpxor %%XMM7, %%T1
+ vpxor %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*1]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [%%GDATA + 16*2]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+ vpclmulqdq %%T4, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T7, %%T2, %%T5, 0x00 ; %%T7 = a0*b0
+
+ vpshufd %%T6, %%T2, 01001110b
+ vpxor %%T6, %%T2
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8_k]
+ vpclmulqdq %%T6, %%T6, %%T5, 0x00 ;
+
+
+ vmovdqu %%T1, [%%GDATA + 16*3]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP2]
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_7_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*4]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu %%T1, [rsp + TMP3]
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_6_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*5]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [rsp + TMP4]
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_5_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*6]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP5]
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_4_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+
+ vmovdqu %%T1, [%%GDATA + 16*7]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP6]
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_3_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*8]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP7]
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_2_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + 16*9]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T1, [rsp + TMP8]
+ vmovdqu %%T5, [%%GDATA + HashKey]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpshufd %%T3, %%T1, 01001110b
+ vpxor %%T3, %%T1
+ vmovdqu %%T5, [%%GDATA + HashKey_k]
+ vpclmulqdq %%T3, %%T3, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vpxor %%T6, %%T4
+ vpxor %%T6, %%T7
+
+%ifdef GCM128_MODE
+ vmovdqu %%T5, [%%GDATA + 16*10]
+%endif
+%ifdef GCM192_MODE
+ vmovdqu %%T5, [%%GDATA + 16*10]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*11]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*12]
+%endif
+%ifdef GCM256_MODE
+ vmovdqu %%T5, [%%GDATA + 16*10]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*11]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*12]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*13]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*14]
+%endif
+
+%assign i 0
+%assign j 1
+%rep 8
+
+%ifidn %%ENC_DEC, ENC
+%ifdef NT_LD
+ VXLDR %%T2, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*i]
+ vpxor %%T2, %%T2, %%T5
+%else
+ vpxor %%T2, %%T5, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*i]
+%endif ; NT_LD
+ vaesenclast reg(j), reg(j), %%T2
+%else
+ VXLDR %%T2, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*i]
+ vpxor %%T2, %%T2, %%T5
+ vaesenclast %%T3, reg(j), %%T2
+ vpxor reg(j), %%T2, %%T5
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*i], %%T3
+%endif ; %%ENC_DEC
+
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+
+ vpslldq %%T3, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpsrldq %%T6, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpxor %%T7, %%T3
+ vpxor %%T6, %%T4 ; accumulate the results in %%T6:%%T7
+
+
+ ;first phase of the reduction
+
+ vpslld %%T2, %%T7, 31 ; packed right shifting << 31
+ vpslld %%T3, %%T7, 30 ; packed right shifting shift << 30
+ vpslld %%T4, %%T7, 25 ; packed right shifting shift << 25
+
+ vpxor %%T2, %%T2, %%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2, %%T4
+
+ vpsrldq %%T1, %%T2, 4 ; shift-R %%T1 1 DW
+
+ vpslldq %%T2, %%T2, 12 ; shift-L %%T2 3 DWs
+ vpxor %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ %ifidn %%ENC_DEC, ENC
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*0], %%XMM1 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*1], %%XMM2 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*2], %%XMM3 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*3], %%XMM4 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*4], %%XMM5 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*5], %%XMM6 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*6], %%XMM7 ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*7], %%XMM8 ; Write to the Ciphertext buffer
+ %endif
+
+ ;second phase of the reduction
+
+ vpsrld %%T2,%%T7,1 ; packed left shifting >> 1
+ vpsrld %%T3,%%T7,2 ; packed left shifting >> 2
+ vpsrld %%T4,%%T7,7 ; packed left shifting >> 7
+ vpxor %%T2, %%T2,%%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2,%%T4
+
+ vpxor %%T2, %%T2, %%T1
+ vpxor %%T7, %%T7, %%T2
+ vpxor %%T6, %%T6, %%T7 ; the result is in %%T6
+
+
+
+ vpshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [SHUF_MASK]
+ vpshufb %%XMM3, [SHUF_MASK]
+ vpshufb %%XMM4, [SHUF_MASK]
+ vpshufb %%XMM5, [SHUF_MASK]
+ vpshufb %%XMM6, [SHUF_MASK]
+ vpshufb %%XMM7, [SHUF_MASK]
+ vpshufb %%XMM8, [SHUF_MASK]
+
+
+ vpxor %%XMM1, %%T6
+
+%endmacro
+
+
+; GHASH the last 4 ciphertext blocks.
+; %%GDATA is GCM key data
+%macro GHASH_LAST_8 16
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+%define %%XMM8 %16
+ ;; Karatsuba Method
+
+
+ vpshufd %%T2, %%XMM1, 01001110b
+ vpxor %%T2, %%XMM1
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+ vpclmulqdq %%T6, %%XMM1, %%T5, 0x11
+ vpclmulqdq %%T7, %%XMM1, %%T5, 0x00
+
+ vmovdqu %%T3, [%%GDATA + HashKey_8_k]
+ vpclmulqdq %%XMM1, %%T2, %%T3, 0x00
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpxor %%T2, %%XMM2
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_7_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+
+ vpshufd %%T2, %%XMM3, 01001110b
+ vpxor %%T2, %%XMM3
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_6_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+
+ vpshufd %%T2, %%XMM4, 01001110b
+ vpxor %%T2, %%XMM4
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_5_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpshufd %%T2, %%XMM5, 01001110b
+ vpxor %%T2, %%XMM5
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_4_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpshufd %%T2, %%XMM6, 01001110b
+ vpxor %%T2, %%XMM6
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_3_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpshufd %%T2, %%XMM7, 01001110b
+ vpxor %%T2, %%XMM7
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_2_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpshufd %%T2, %%XMM8, 01001110b
+ vpxor %%T2, %%XMM8
+ vmovdqu %%T5, [%%GDATA + HashKey]
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vmovdqu %%T3, [%%GDATA + HashKey_k]
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+ vpxor %%XMM1, %%XMM1, %%T6
+ vpxor %%T2, %%XMM1, %%T7
+
+
+
+
+ vpslldq %%T4, %%T2, 8
+ vpsrldq %%T2, %%T2, 8
+
+ vpxor %%T7, %%T4
+ vpxor %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+ ;first phase of the reduction
+
+ vpslld %%T2, %%T7, 31 ; packed right shifting << 31
+ vpslld %%T3, %%T7, 30 ; packed right shifting shift << 30
+ vpslld %%T4, %%T7, 25 ; packed right shifting shift << 25
+
+ vpxor %%T2, %%T2, %%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2, %%T4
+
+ vpsrldq %%T1, %%T2, 4 ; shift-R %%T1 1 DW
+
+ vpslldq %%T2, %%T2, 12 ; shift-L %%T2 3 DWs
+ vpxor %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;second phase of the reduction
+
+ vpsrld %%T2,%%T7,1 ; packed left shifting >> 1
+ vpsrld %%T3,%%T7,2 ; packed left shifting >> 2
+ vpsrld %%T4,%%T7,7 ; packed left shifting >> 7
+ vpxor %%T2, %%T2,%%T3 ; xor the shifted versions
+ vpxor %%T2, %%T2,%%T4
+
+ vpxor %%T2, %%T2, %%T1
+ vpxor %%T7, %%T7, %%T2
+ vpxor %%T6, %%T6, %%T7 ; the result is in %%T6
+
+
+%endmacro
+
+
+; Encryption of a single block
+; %%GDATA is GCM key data
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ vpxor %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ vaesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep ; NROUNDS
+ vaesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+
+;; Start of Stack Setup
+
+%macro FUNC_SAVE 0
+ ;; Required for Update/GMC_ENC
+ ;the number of pushes must equal STACK_OFFSET
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+ vmovdqu [rsp + LOCAL_STORAGE + 1*16],xmm7
+ vmovdqu [rsp + LOCAL_STORAGE + 2*16],xmm8
+ vmovdqu [rsp + LOCAL_STORAGE + 3*16],xmm9
+ vmovdqu [rsp + LOCAL_STORAGE + 4*16],xmm10
+ vmovdqu [rsp + LOCAL_STORAGE + 5*16],xmm11
+ vmovdqu [rsp + LOCAL_STORAGE + 6*16],xmm12
+ vmovdqu [rsp + LOCAL_STORAGE + 7*16],xmm13
+ vmovdqu [rsp + LOCAL_STORAGE + 8*16],xmm14
+ vmovdqu [rsp + LOCAL_STORAGE + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15 , [rsp + LOCAL_STORAGE + 9*16]
+ vmovdqu xmm14 , [rsp + LOCAL_STORAGE + 8*16]
+ vmovdqu xmm13 , [rsp + LOCAL_STORAGE + 7*16]
+ vmovdqu xmm12 , [rsp + LOCAL_STORAGE + 6*16]
+ vmovdqu xmm11 , [rsp + LOCAL_STORAGE + 5*16]
+ vmovdqu xmm10 , [rsp + LOCAL_STORAGE + 4*16]
+ vmovdqu xmm9 , [rsp + LOCAL_STORAGE + 3*16]
+ vmovdqu xmm8 , [rsp + LOCAL_STORAGE + 2*16]
+ vmovdqu xmm7 , [rsp + LOCAL_STORAGE + 1*16]
+ vmovdqu xmm6 , [rsp + LOCAL_STORAGE + 0*16]
+%endif
+
+;; Required for Update/GMC_ENC
+ mov rsp, r14
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_INIT initializes a gcm_context_data struct to prepare for encoding/decoding.
+; Input: struct gcm_key_data *(GDATA_KEY), struct gcm_context_data *(GDATA_CTX),
+; IV, Additional Authentication data (A_IN), Additional
+; Data length (A_LEN)
+; Output: Updated GDATA with the hash of A_IN (AadHash) and initialized other parts of GDATA.
+; Clobbers rax, r10-r13, and xmm0-xmm6
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_INIT 5
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%IV %3
+%define %%A_IN %4
+%define %%A_LEN %5
+%define %%AAD_HASH xmm0
+
+ CALC_AAD_HASH %%A_IN, %%A_LEN, %%AAD_HASH, %%GDATA_KEY, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, r10, r11, r12, r13, rax
+ vpxor xmm2, xmm3
+ mov r10, %%A_LEN
+
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH ; ctx_data.aad hash = aad_hash
+ mov [%%GDATA_CTX + AadLen], r10 ; ctx_data.aad_length = aad_length
+ xor r10, r10
+ mov [%%GDATA_CTX + InLen], r10 ; ctx_data.in_length = 0
+ mov [%%GDATA_CTX + PBlockLen], r10 ; ctx_data.partial_block_length = 0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm2 ; ctx_data.partial_block_enc_key = 0
+ mov r10, %%IV
+ vmovdqa xmm2, [rel ONEf] ; read 12 IV bytes and pad with 0x00000001
+ vpinsrq xmm2, [r10], 0
+ vpinsrd xmm2, [r10+8], 2
+ vmovdqu [%%GDATA_CTX + OrigIV], xmm2 ; ctx_data.orig_IV = iv
+
+ vpshufb xmm2, [rel SHUF_MASK]
+
+ vmovdqu [%%GDATA_CTX + CurCount], xmm2 ; ctx_data.current_counter = iv
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context_data struct
+; has been initialized by GCM_INIT
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: struct gcm_key_data* (GDATA_KEY), struct gcm_context_data * (GDATA_CTX),
+; input text (PLAIN_CYPH_IN), input text length (PLAIN_CYPH_LEN),
+; and whether encoding or decoding (ENC_DEC)
+; Output: A cypher of the given plain text (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10-r15, and xmm0-xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_ENC_DEC 6
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%DATA_OFFSET r11
+
+; Macro flow:
+; calculate the number of 16byte blocks in the message
+; process (number of 16byte blocks) mod 8 '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+; process 8 16 byte blocks at a time until all are done '%%_encrypt_by_8_new .. %%_eight_cipher_left'
+; if there is a block of less tahn 16 bytes process it '%%_zero_cipher_left .. %%_multiple_of_16_bytes'
+ cmp %%PLAIN_CYPH_LEN, 0
+ je %%_multiple_of_16_bytes
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + InLen], rax ; Update length of data processed
+%else
+ add [%%GDATA_CTX + InLen], %%PLAIN_CYPH_LEN ; Update length of data processed
+%endif
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey] ; xmm13 = HashKey
+ vmovdqu xmm8, [%%GDATA_CTX + AadHash]
+
+
+ PARTIAL_BLOCK %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%DATA_OFFSET, xmm8, %%ENC_DEC
+
+
+ mov r13, %%PLAIN_CYPH_LEN
+ sub r13, %%DATA_OFFSET
+ mov r10, r13 ; save the amount of data left to process in r10
+ and r13, -16 ; r13 = r13 - (r13 mod 16)
+
+ mov r12, r13
+ shr r12, 4
+ and r12, 7
+
+ jz %%_initial_num_blocks_is_0
+
+ cmp r12, 7
+ je %%_initial_num_blocks_is_7
+ cmp r12, 6
+ je %%_initial_num_blocks_is_6
+ cmp r12, 5
+ je %%_initial_num_blocks_is_5
+ cmp r12, 4
+ je %%_initial_num_blocks_is_4
+ cmp r12, 3
+ je %%_initial_num_blocks_is_3
+ cmp r12, 2
+ je %%_initial_num_blocks_is_2
+
+ jmp %%_initial_num_blocks_is_1
+
+%%_initial_num_blocks_is_7:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 7, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*7
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_6:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 6, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*6
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_5:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 5, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*5
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_4:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 4, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*4
+ jmp %%_initial_blocks_encrypted
+
+
+%%_initial_num_blocks_is_3:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 3, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*3
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_2:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 2, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*2
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_1:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 1, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_0:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 0, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+
+
+%%_initial_blocks_encrypted:
+ cmp r13, 0
+ je %%_zero_cipher_left
+
+ sub r13, 128
+ je %%_eight_cipher_left
+
+
+
+
+ vmovd r15d, xmm9
+ and r15d, 255
+ vpshufb xmm9, [SHUF_MASK]
+
+
+%%_encrypt_by_8_new:
+ cmp r15d, 255-8
+ jg %%_encrypt_by_8
+
+
+
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, out_order, %%ENC_DEC
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ jne %%_encrypt_by_8_new
+
+ vpshufb xmm9, [SHUF_MASK]
+ jmp %%_eight_cipher_left
+
+%%_encrypt_by_8:
+ vpshufb xmm9, [SHUF_MASK]
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN,%%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC
+ vpshufb xmm9, [SHUF_MASK]
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ jne %%_encrypt_by_8_new
+
+ vpshufb xmm9, [SHUF_MASK]
+
+
+
+
+%%_eight_cipher_left:
+ GHASH_LAST_8 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+
+
+%%_zero_cipher_left:
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14 ; ctx_data.aad hash = xmm14
+ vmovdqu [%%GDATA_CTX + CurCount], xmm9 ; ctx_data.current_counter = xmm9
+
+ mov r13, r10
+ and r13, 15 ; r13 = (%%PLAIN_CYPH_LEN mod 16)
+
+ je %%_multiple_of_16_bytes
+
+ mov [%%GDATA_CTX + PBlockLen], r13 ; ctx_data.partial_blck_length = r13
+ ; handle the last <16 Byte block seperately
+
+ vpaddd xmm9, [ONE] ; INCR CNT to get Yn
+ vmovdqu [%%GDATA_CTX + CurCount], xmm9 ; my_ctx_data.current_counter = xmm9
+ vpshufb xmm9, [SHUF_MASK]
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9 ; E(K, Yn)
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm9 ; ctx_data.partial_block_enc_key = xmm9
+
+ cmp %%PLAIN_CYPH_LEN, 16
+ jge %%_large_enough_update
+
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ READ_SMALL_DATA_INPUT xmm1, r10, r13, r12, r15, rax
+ lea r12, [SHIFT_MASK + 16]
+ sub r12, r13
+ jmp %%_data_read
+
+%%_large_enough_update:
+ sub %%DATA_OFFSET, 16
+ add %%DATA_OFFSET, r13
+
+ vmovdqu xmm1, [%%PLAIN_CYPH_IN+%%DATA_OFFSET] ; receive the last <16 Byte block
+
+ sub %%DATA_OFFSET, r13
+ add %%DATA_OFFSET, 16
+
+
+ lea r12, [SHIFT_MASK + 16]
+ sub r12, r13 ; adjust the shuffle mask pointer to be able to shift 16-r13 bytes (r13 is the number of bytes in plaintext mod 16)
+
+ vmovdqu xmm2, [r12] ; get the appropriate shuffle mask
+ vpshufb xmm1, xmm2 ; shift right 16-r13 bytes
+%%_data_read:
+%ifidn %%ENC_DEC, DEC
+ vmovdqa xmm2, xmm1
+ vpxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+ vmovdqu xmm1, [r12 + ALL_F - SHIFT_MASK] ; get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out top 16-r13 bytes of xmm9
+ vpand xmm2, xmm1
+ vpshufb xmm2, [SHUF_MASK]
+ vpxor xmm14, xmm2
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+%else
+ vpxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+ vmovdqu xmm1, [r12 + ALL_F - SHIFT_MASK] ; get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out top 16-r13 bytes of xmm9
+ vpshufb xmm9, [SHUF_MASK]
+ vpxor xmm14, xmm9
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+ vpshufb xmm9, [SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output r13 Bytes
+ vmovq rax, xmm9
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ vpsrldq xmm9, xmm9, 8
+ vmovq rax, xmm9
+ sub r13, 8
+
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_multiple_of_16_bytes:
+
+
+
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_COMPLETE Finishes Encyrption/Decryption of last partial block after GCM_UPDATE finishes.
+; Input: struct gcm_key_data* (GDATA_KEY), struct gcm_context_data *(GDATA_CTX) and
+; whether encoding or decoding (ENC_DEC).
+; Output: Authorization Tag (AUTH_TAG) and Authorization Tag length (AUTH_TAG_LEN)
+; Clobbers rax, r10-r12, and xmm0, xmm1, xmm5, xmm6, xmm9, xmm11, xmm14, xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_COMPLETE 5
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%AUTH_TAG %3
+%define %%AUTH_TAG_LEN %4
+%define %%ENC_DEC %5
+%define %%PLAIN_CYPH_LEN rax
+
+ mov r12, [%%GDATA_CTX + PBlockLen]
+ vmovdqu xmm14, [%%GDATA_CTX + AadHash]
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ cmp r12, 0
+
+ je %%_partial_done
+
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+%%_partial_done:
+
+ mov r12, [%%GDATA_CTX + AadLen] ; r12 = aadLen (number of bytes)
+ mov %%PLAIN_CYPH_LEN, [%%GDATA_CTX + InLen]
+
+ shl r12, 3 ; convert into number of bits
+ vmovd xmm15, r12d ; len(A) in xmm15
+
+ shl %%PLAIN_CYPH_LEN, 3 ; len(C) in bits (*128)
+ vmovq xmm1, %%PLAIN_CYPH_LEN
+ vpslldq xmm15, xmm15, 8 ; xmm15 = len(A)|| 0x0000000000000000
+ vpxor xmm15, xmm1 ; xmm15 = len(A)||len(C)
+
+ vpxor xmm14, xmm15
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ; final GHASH computation
+ vpshufb xmm14, [SHUF_MASK] ; perform a 16Byte swap
+
+ vmovdqu xmm9, [%%GDATA_CTX + OrigIV] ; xmm9 = Y0
+
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9 ; E(K, Y0)
+
+ vpxor xmm9, xmm14
+
+
+%%_return_T:
+ mov r10, %%AUTH_TAG ; r10 = authTag
+ mov r11, %%AUTH_TAG_LEN ; r11 = auth_tag_len
+
+ cmp r11, 16
+ je %%_T_16
+
+ cmp r11, 12
+ je %%_T_12
+
+ cmp r11, 8
+ je %%_T_8
+
+ simd_store_avx r10, xmm9, r11, r12, rax
+ jmp %%_return_T_done
+%%_T_8:
+ vmovq rax, xmm9
+ mov [r10], rax
+ jmp %%_return_T_done
+%%_T_12:
+ vmovq rax, xmm9
+ mov [r10], rax
+ vpsrldq xmm9, xmm9, 8
+ vmovd eax, xmm9
+ mov [r10 + 8], eax
+ jmp %%_return_T_done
+%%_T_16:
+ vmovdqu [r10], xmm9
+
+%%_return_T_done:
+
+%ifdef SAFE_DATA
+ ;; Clear sensitive data from context structure
+ vpxor xmm0, xmm0
+ vmovdqu [%%GDATA_CTX + AadHash], xmm0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm0
+%endif
+%endmacro ; GCM_COMPLETE
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_precomp_128_avx_gen2
+; (struct gcm_key_data *key_data);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(precomp,_),function,)
+FN_NAME(precomp,_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_precomp
+%endif
+
+ push r12
+ push r13
+ push r14
+ push r15
+
+ mov r14, rsp
+
+
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63 ; align rsp to 64 bytes
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; only xmm6 needs to be maintained
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+%endif
+
+ vpxor xmm6, xmm6
+ ENCRYPT_SINGLE_BLOCK arg1, xmm6 ; xmm6 = HashKey
+
+ vpshufb xmm6, [SHUF_MASK]
+ ;;;;;;;;;;;;;;; PRECOMPUTATION of HashKey<<1 mod poly from the HashKey;;;;;;;;;;;;;;;
+ vmovdqa xmm2, xmm6
+ vpsllq xmm6, 1
+ vpsrlq xmm2, 63
+ vmovdqa xmm1, xmm2
+ vpslldq xmm2, xmm2, 8
+ vpsrldq xmm1, xmm1, 8
+ vpor xmm6, xmm2
+ ;reduction
+ vpshufd xmm2, xmm1, 00100100b
+ vpcmpeqd xmm2, [TWOONE]
+ vpand xmm2, [POLY]
+ vpxor xmm6, xmm2 ; xmm6 holds the HashKey<<1 mod poly
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu [arg1 + HashKey], xmm6 ; store HashKey<<1 mod poly
+
+
+ PRECOMPUTE arg1, xmm6, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+ mov rsp, r14
+
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+exit_precomp:
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_init_128_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(init,_),function,)
+FN_NAME(init,_):
+ push r12
+ push r13
+%ifidn __OUTPUT_FORMAT__, win64
+ push r14
+ push r15
+ mov r14, rsp
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 1*16
+ movdqu [rsp + 0*16], xmm6
+%endif
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_init
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_init
+
+ ;; Check IV != NULL
+ cmp arg3, 0
+ jz exit_init
+
+ ;; Check if aad_len == 0
+ cmp arg5, 0
+ jz skip_aad_check_init
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg4, 0
+ jz exit_init
+
+skip_aad_check_init:
+%endif
+ GCM_INIT arg1, arg2, arg3, arg4, arg5
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+exit_init:
+
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm6 , [rsp + 0*16]
+ mov rsp, r14
+ pop r15
+ pop r14
+%endif
+ pop r13
+ pop r12
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_update_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_update_),function,)
+FN_NAME(enc,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_enc
+
+skip_in_out_check_update_enc:
+%endif
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC
+
+exit_update_enc:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_update_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_update_),function,)
+FN_NAME(dec,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_dec
+
+skip_in_out_check_update_dec:
+%endif
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC
+
+exit_update_dec:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_finalize_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_finalize_),function,)
+FN_NAME(enc,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_enc_fin
+
+ cmp arg4, 16
+ ja exit_enc_fin
+%endif
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16],xmm6
+ vmovdqu [rsp + 1*16],xmm9
+ vmovdqu [rsp + 2*16],xmm11
+ vmovdqu [rsp + 3*16],xmm14
+ vmovdqu [rsp + 4*16],xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, ENC
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15 , [rsp + 4*16]
+ vmovdqu xmm14 , [rsp + 3*16]
+ vmovdqu xmm11 , [rsp + 2*16]
+ vmovdqu xmm9 , [rsp + 1*16]
+ vmovdqu xmm6 , [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+exit_enc_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_finalize_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_finalize_),function,)
+FN_NAME(dec,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_dec_fin
+
+ cmp arg4, 16
+ ja exit_dec_fin
+%endif
+
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16],xmm6
+ vmovdqu [rsp + 1*16],xmm9
+ vmovdqu [rsp + 2*16],xmm11
+ vmovdqu [rsp + 3*16],xmm14
+ vmovdqu [rsp + 4*16],xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, DEC
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15 , [rsp + 4*16]
+ vmovdqu xmm14 , [rsp + 3*16]
+ vmovdqu xmm11 , [rsp + 2*16]
+ vmovdqu xmm9 , [rsp + 1*16]
+ vmovdqu xmm6 , [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_avx_asm
+%endif
+exit_dec_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_),function,)
+FN_NAME(enc,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_enc
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_enc
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_enc
+
+ cmp arg10, 16
+ ja exit_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_enc
+
+skip_in_out_check_enc:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_enc
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_enc
+
+skip_aad_check_enc:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, ENC
+
+exit_enc:
+ FUNC_RESTORE
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_avx_gen2(
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_),function,)
+FN_NAME(dec,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_dec
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_dec
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_dec
+
+ cmp arg10, 16
+ ja exit_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_dec
+
+skip_in_out_check_dec:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_dec
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_dec
+
+skip_aad_check_dec:
+%endif
+
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, DEC
+
+exit_dec:
+ FUNC_RESTORE
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/kasumi_avx.c b/src/spdk/intel-ipsec-mb/avx/kasumi_avx.c
new file mode 100644
index 000000000..4739191ac
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/kasumi_avx.c
@@ -0,0 +1,386 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <limits.h>
+
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_avx
+
+#include "include/save_xmms.h"
+#include "include/kasumi_internal.h"
+#include "include/save_xmms.h"
+#include "include/clear_regs_mem.h"
+
+#define SAVE_XMMS save_xmms_avx
+#define RESTORE_XMMS restore_xmms_avx
+
+void
+kasumi_f8_1_buffer_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (cipherLengthInBytes == 0 ||
+ cipherLengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_1_buffer(pCtx, IV, pBufferIn, pBufferOut,
+ cipherLengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_1_buffer_bit_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (cipherLengthInBits == 0 ||
+ cipherLengthInBits > KASUMI_MAX_LEN)
+ return;
+#endif
+ kasumi_f8_1_buffer_bit(pCtx, IV, pBufferIn, pBufferOut,
+ cipherLengthInBits, offsetInBits);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_2_buffer_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const void *pBufferIn1,
+ void *pBufferOut1, const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes1 == 0 || lengthInBytes1 > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+
+ if (lengthInBytes2 == 0 || lengthInBytes2 > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_2_buffer(pCtx, IV1, IV2,
+ pBufferIn1, pBufferOut1, lengthInBytes1,
+ pBufferIn2, pBufferOut2, lengthInBytes2);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_3_buffer_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const uint64_t IV3,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ if (pBufferIn3 == NULL || pBufferOut3 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_3_buffer(pCtx, IV1, IV2, IV3,
+ pBufferIn1, pBufferOut1,
+ pBufferIn2, pBufferOut2,
+ pBufferIn3, pBufferOut3, lengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_4_buffer_avx(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const uint64_t IV3, const uint64_t IV4,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ if (pBufferIn3 == NULL || pBufferOut3 == NULL)
+ return;
+
+ if (pBufferIn4 == NULL || pBufferOut4 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_4_buffer(pCtx, IV1, IV2, IV3, IV4,
+ pBufferIn1, pBufferOut1,
+ pBufferIn2, pBufferOut2,
+ pBufferIn3, pBufferOut3,
+ pBufferIn4, pBufferOut4,
+ lengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_n_buffer_avx(const kasumi_key_sched_t *pKeySchedule,
+ const uint64_t IV[],
+ const void * const pDataIn[], void *pDataOut[],
+ const uint32_t dataLen[], const uint32_t dataCount)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+ uint32_t numLeft = dataCount;
+ const uint64_t *IVPtr;
+ const void * const *pDataInPtr;
+ void **pDataOutPtr;
+ const uint32_t *dataLenPtr;
+ uint32_t i = 0;
+ uint32_t numBuffs;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKeySchedule == NULL || pDataIn == NULL || pDataOut == NULL ||
+ dataLen == NULL || IV == NULL)
+ return;
+
+ for (i = 0; i < dataCount; i++) {
+ /* Check for NULL pointers */
+ if (pDataIn[i] == NULL || pDataOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (dataLen[i] == 0 || dataLen[i] > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+ }
+#endif
+
+ i = 0;
+
+ /* KASUMI F8 n buffer function can handle up to 16 buffers */
+ while (numLeft > 0) {
+ IVPtr = &IV[i];
+ pDataInPtr = &pDataIn[i];
+ pDataOutPtr = &pDataOut[i];
+ dataLenPtr = &dataLen[i];
+ numBuffs = (numLeft > 16) ? 16 : numLeft;
+
+ kasumi_f8_n_buffer(pKeySchedule, IVPtr, pDataInPtr, pDataOutPtr,
+ dataLenPtr, numBuffs);
+ i += numBuffs;
+ numLeft -= numBuffs;
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+
+void
+kasumi_f9_1_buffer_avx(const kasumi_key_sched_t *pCtx, const void *pBufferIn,
+ const uint32_t lengthInBytes, void *pDigest)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pDigest == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f9_1_buffer(pCtx, pBufferIn, lengthInBytes, pDigest);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f9_1_buffer_user_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, const uint32_t lengthInBits,
+ void *pDigest, const uint32_t direction)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pDigest == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBits == 0 || lengthInBits > KASUMI_MAX_LEN)
+ return;
+#endif
+ kasumi_f9_1_buffer_user(pCtx, IV, pBufferIn, lengthInBits,
+ pDigest, direction);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+int
+kasumi_init_f8_key_sched_avx(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_init_f8_key_sched(pKey, pCtx);
+}
+
+int
+kasumi_init_f9_key_sched_avx(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_init_f9_key_sched(pKey, pCtx);
+}
+
+size_t
+kasumi_key_sched_size_avx(void)
+{
+ return kasumi_key_sched_size();
+}
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_flush_avx.asm
new file mode 100644
index 000000000..3e3de0492
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_flush_avx.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X8 aes_cbc_enc_192_x8
+%define FLUSH_JOB_AES_ENC flush_job_aes192_enc_avx
+%include "avx/mb_mgr_aes_flush_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_submit_avx.asm
new file mode 100644
index 000000000..57fae603c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes192_submit_avx.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X8 aes_cbc_enc_192_x8
+%define SUBMIT_JOB_AES_ENC submit_job_aes192_enc_avx
+%include "avx/mb_mgr_aes_submit_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_flush_avx.asm
new file mode 100644
index 000000000..04c4824d7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_flush_avx.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X8 aes_cbc_enc_256_x8
+%define FLUSH_JOB_AES_ENC flush_job_aes256_enc_avx
+%include "avx/mb_mgr_aes_flush_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_submit_avx.asm
new file mode 100644
index 000000000..ee1de7165
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes256_submit_avx.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X8 aes_cbc_enc_256_x8
+%define SUBMIT_JOB_AES_ENC submit_job_aes256_enc_avx
+%include "avx/mb_mgr_aes_submit_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_ccm_auth_submit_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_ccm_auth_submit_flush_avx.asm
new file mode 100644
index 000000000..9d132ec5f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_ccm_auth_submit_flush_avx.asm
@@ -0,0 +1,537 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+%include "include/memcpy.asm"
+
+%ifndef AES128_CBC_MAC
+
+%define AES128_CBC_MAC aes128_cbc_mac_x8
+%define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_avx
+%define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_avx
+
+%endif
+
+extern AES128_CBC_MAC
+
+section .data
+default rel
+
+align 16
+len_mask:
+ dq 0xFFFFFFFFFFFFFFF0
+align 16
+len_masks:
+ dq 0x000000000000FFFF, 0x0000000000000000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+dupw:
+ dq 0x0100010001000100, 0x0100010001000100
+counter_mask:
+ dq 0xFFFFFFFFFFFFFF07, 0x0000FFFFFFFFFFFF
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%define NROUNDS 9 ; AES-CCM-128
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+%define tmp4 rax
+%define auth_len_aad rax
+
+%define min_idx rbp
+%define flags rbp
+
+%define lane r8
+
+%define iv_len r9
+%define auth_len r9
+
+%define aad_len r10
+%define init_block_addr r11
+
+%define unused_lanes rbx
+%define r rbx
+
+%define tmp r12
+%define tmp2 r13
+%define tmp3 r14
+
+%define good_lane r15
+%define min_job r15
+
+%define init_block0 xmm0
+%define ccm_lens xmm1
+%define min_len_idx xmm2
+%define xtmp0 xmm3
+%define xtmp1 xmm4
+%define xtmp2 xmm5
+%define xtmp3 xmm6
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ vpxor %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ vaesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep
+ vaesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+;;; ===========================================================================
+;;; AES CCM auth job submit & flush
+;;; ===========================================================================
+;;; SUBMIT_FLUSH [in] - SUBMIT, FLUSH job selection
+%macro GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_AVX 1
+%define %%SUBMIT_FLUSH %1
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ;; Find free lane
+ mov unused_lanes, [state + _aes_ccm_unused_lanes]
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+
+ mov lane, unused_lanes
+ and lane, 15
+ shr unused_lanes, 4
+ mov [state + _aes_ccm_unused_lanes], unused_lanes
+
+ ;; Copy job info into lane
+ mov [state + _aes_ccm_job_in_lane + lane*8], job
+ ;; Copy keys into lane args
+ mov tmp, [job + _aes_enc_key_expanded]
+ mov [state + _aes_ccm_args_keys + lane*8], tmp
+ ;; init_done = 0
+ mov word [state + _aes_ccm_init_done + lane*2], 0
+ lea tmp, [lane * 8]
+
+ vpxor init_block0, init_block0
+ vmovdqa [state + _aes_ccm_args_IV + tmp*2], init_block0
+
+ ;; Prepare initial Block 0 for CBC-MAC-128
+
+ ;; Byte 0: flags with L' and M' (AAD later)
+ ;; Calculate L' = 15 - IV length - 1 = 14 - IV length
+ mov flags, 14
+ mov iv_len, [job + _iv_len_in_bytes]
+ sub flags, iv_len
+ ;; Calculate M' = (Digest length - 2) / 2
+ mov tmp, [job + _auth_tag_output_len_in_bytes]
+ sub tmp, 2
+
+ shl tmp, 2 ; M' << 3 (combine 1xshr, to div by 2, and 3xshl)
+ or flags, tmp
+
+ ;; Bytes 1 - 13: Nonce (7 - 13 bytes long)
+
+ ;; Bytes 1 - 7 are always copied (first 7 bytes)
+ mov tmp, [job + _iv]
+ vpinsrb init_block0, [tmp], 1
+ vpinsrw init_block0, [tmp + 1], 1
+ vpinsrd init_block0, [tmp + 3], 1
+
+ cmp iv_len, 7
+ je %%_finish_nonce_move
+
+ cmp iv_len, 8
+ je %%_iv_length_8
+ cmp iv_len, 9
+ je %%_iv_length_9
+ cmp iv_len, 10
+ je %%_iv_length_10
+ cmp iv_len, 11
+ je %%_iv_length_11
+ cmp iv_len, 12
+ je %%_iv_length_12
+
+ ;; Bytes 8 - 13
+%%_iv_length_13:
+ vpinsrb init_block0, [tmp + 12], 13
+%%_iv_length_12:
+ vpinsrb init_block0, [tmp + 11], 12
+%%_iv_length_11:
+ vpinsrd init_block0, [tmp + 7], 2
+ jmp %%_finish_nonce_move
+%%_iv_length_10:
+ vpinsrb init_block0, [tmp + 9], 10
+%%_iv_length_9:
+ vpinsrb init_block0, [tmp + 8], 9
+%%_iv_length_8:
+ vpinsrb init_block0, [tmp + 7], 8
+
+%%_finish_nonce_move:
+
+ ;; Bytes 14 & 15 (message length), in Big Endian
+ mov ax, [job + _msg_len_to_hash_in_bytes]
+ xchg al, ah
+ vpinsrw init_block0, ax, 7
+
+ mov aad_len, [job + _cbcmac_aad_len]
+ ;; Initial length to authenticate (Block 0)
+ mov auth_len, 16
+ ;; Length to authenticate (Block 0 + len(AAD) (2B) + AAD padded,
+ ;; so length is multiple of 64B)
+ lea auth_len_aad, [aad_len + (2 + 15) + 16]
+ and auth_len_aad, -16
+
+ or aad_len, aad_len
+ cmovne auth_len, auth_len_aad
+ ;; Update lengths to authenticate and find min length
+ vmovdqa ccm_lens, [state + _aes_ccm_lens]
+ XVPINSRW ccm_lens, xtmp0, tmp2, lane, auth_len, scale_x16
+ vmovdqa [state + _aes_ccm_lens], ccm_lens
+ vphminposuw min_len_idx, ccm_lens
+
+ mov tmp, lane
+ shl tmp, 6
+ lea init_block_addr, [state + _aes_ccm_init_blocks + tmp]
+ or aad_len, aad_len
+ je %%_aad_complete
+
+ or flags, (1 << 6) ; Set Adata bit in flags
+
+ ;; Copy AAD
+ ;; Set all 0s in last block (padding)
+ lea tmp, [init_block_addr + auth_len]
+ sub tmp, 16
+ vpxor xtmp0, xtmp0
+ vmovdqa [tmp], xtmp0
+
+ ;; Start copying from second block
+ lea tmp, [init_block_addr+16]
+ mov rax, aad_len
+ xchg al, ah
+ mov [tmp], ax
+ add tmp, 2
+ mov tmp2, [job + _cbcmac_aad]
+ memcpy_avx_64_1 tmp, tmp2, aad_len, tmp3, tmp4, xtmp0, xtmp1, xtmp2, xtmp3
+
+%%_aad_complete:
+
+ ;; Finish Block 0 with Byte 0
+ vpinsrb init_block0, BYTE(flags), 0
+ vmovdqa [init_block_addr], init_block0
+
+ mov [state + _aes_ccm_args_in + lane * 8], init_block_addr
+
+ cmp byte [state + _aes_ccm_unused_lanes], 0xf
+ jne %%_return_null
+
+%else ; end SUBMIT
+
+ ;; Check at least one job
+ bt unused_lanes, 35
+ jc %%_return_null
+
+ ;; Find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp QWORD [state + _aes_ccm_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp QWORD [state + _aes_ccm_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp QWORD [state + _aes_ccm_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+ cmp qword [state + _aes_ccm_job_in_lane + 4*8], 0
+ cmovne good_lane, [rel four]
+ cmp qword [state + _aes_ccm_job_in_lane + 5*8], 0
+ cmovne good_lane, [rel five]
+ cmp qword [state + _aes_ccm_job_in_lane + 6*8], 0
+ cmovne good_lane, [rel six]
+ cmp qword [state + _aes_ccm_job_in_lane + 7*8], 0
+ cmovne good_lane, [rel seven]
+
+ ; Copy good_lane to empty lanes
+ movzx tmp, word [state + _aes_ccm_init_done + good_lane*2]
+ mov tmp2, [state + _aes_ccm_args_in + good_lane*8]
+ mov tmp3, [state + _aes_ccm_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ vmovdqa xtmp0, [state + _aes_ccm_args_IV + good_lane]
+ vmovdqa ccm_lens, [state + _aes_ccm_lens]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_ccm_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ vpor ccm_lens, [rel len_masks + 16*I]
+ mov [state + _aes_ccm_init_done + I*2], WORD(tmp)
+ mov [state + _aes_ccm_args_in + I*8], tmp2
+ mov [state + _aes_ccm_args_keys + I*8], tmp3
+ vmovdqa [state + _aes_ccm_args_IV + I*16], xtmp0
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+ vmovdqa [state + _aes_ccm_lens], ccm_lens
+ ;; Find min length
+ vphminposuw min_len_idx, ccm_lens
+
+%endif ; end FLUSH
+
+%%_ccm_round:
+ vpextrw len2, min_len_idx, 0 ; min value
+ vpextrw min_idx, min_len_idx, 1 ; min index (0...7)
+
+ mov min_job, [state + _aes_ccm_job_in_lane + min_idx*8]
+
+ or len2, len2
+ je %%_len_is_0
+ ;; subtract min length from all lengths
+ vpshufb min_len_idx, min_len_idx, [rel dupw] ; broadcast min length
+ vpsubw ccm_lens, min_len_idx
+ vmovdqa [state + _aes_ccm_lens], ccm_lens
+
+ ; "state" and "args" are the same address, arg1
+ ; len2 is arg2
+ call AES128_CBC_MAC
+ ; state and min_idx are intact
+
+%%_len_is_0:
+
+ movzx tmp, WORD [state + _aes_ccm_init_done + min_idx*2]
+ cmp WORD(tmp), 0
+ je %%_prepare_full_blocks_to_auth
+ cmp WORD(tmp), 1
+ je %%_prepare_partial_block_to_auth
+
+%%_encrypt_digest:
+
+ ;; Set counter block 0 (reusing previous initial block 0)
+ mov tmp, min_idx
+ shl tmp, 3
+ vmovdqa init_block0, [state + _aes_ccm_init_blocks + tmp * 8]
+
+ vpand init_block0, [rel counter_mask]
+
+ mov tmp2, [state + _aes_ccm_args_keys + tmp]
+ ENCRYPT_SINGLE_BLOCK tmp2, init_block0
+ vpxor init_block0, [state + _aes_ccm_args_IV + tmp * 2]
+
+ ;; Copy Mlen bytes into auth_tag_output (Mlen = 4,6,8,10,12,14,16)
+ mov min_job, [state + _aes_ccm_job_in_lane + tmp]
+ mov tmp3, [min_job + _auth_tag_output_len_in_bytes]
+ mov tmp2, [min_job + _auth_tag_output]
+
+ simd_store_avx tmp2, init_block0, tmp3, tmp, tmp4
+%%_update_lanes:
+ ; Update unused lanes
+ mov unused_lanes, [state + _aes_ccm_unused_lanes]
+ shl unused_lanes, 4
+ or unused_lanes, min_idx
+ mov [state + _aes_ccm_unused_lanes], unused_lanes
+
+ ; Set return job
+ mov job_rax, min_job
+
+ mov qword [state + _aes_ccm_job_in_lane + min_idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+
+%ifdef SAFE_DATA
+ vpxor xtmp0, xtmp0
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+ shl min_idx, 3
+ ;; Clear digest (in memory for CBC IV), counter block 0 and AAD of returned job
+ vmovdqa [state + _aes_ccm_args_IV + min_idx * 2], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + min_idx * 8], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 16], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 32], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 48], xtmp0
+ mov qword [state + _aes_ccm_args_keys + min_idx], 0
+%else
+ ;; Clear digest (in memory for CBC IV), counter block 0 and AAD
+ ;; of returned job and "NULL lanes"
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_ccm_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ vmovdqa [state + _aes_ccm_args_IV + I*16], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + I*64], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + I*64 + 16], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + I*64 + 32], xtmp0
+ vmovdqa [state + _aes_ccm_init_blocks + I*64 + 48], xtmp0
+ mov qword [state + _aes_ccm_args_keys + I*8], 0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SUBMIT
+%endif ;; SAFE_DATA
+
+%%_return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%%_return_null:
+ xor job_rax, job_rax
+ jmp %%_return
+
+%%_prepare_full_blocks_to_auth:
+
+ cmp dword [min_job + _cipher_direction], 2 ; DECRYPT
+ je %%_decrypt
+
+%%_encrypt:
+ mov tmp, [min_job + _src]
+ add tmp, [min_job + _hash_start_src_offset_in_bytes]
+ jmp %%_set_init_done_1
+
+%%_decrypt:
+ mov tmp, [min_job + _dst]
+
+%%_set_init_done_1:
+ mov [state + _aes_ccm_args_in + min_idx*8], tmp
+ mov word [state + _aes_ccm_init_done + min_idx*2], 1
+
+ ; Check if there are full blocks to hash
+ mov tmp, [min_job + _msg_len_to_hash_in_bytes]
+ and tmp, -16
+ je %%_prepare_partial_block_to_auth
+
+ ;; Update lengths to authenticate and find min length
+ vmovdqa ccm_lens, [state + _aes_ccm_lens]
+ XVPINSRW ccm_lens, xtmp0, tmp2, min_idx, tmp, scale_x16
+ vphminposuw min_len_idx, ccm_lens
+ vmovdqa [state + _aes_ccm_lens], ccm_lens
+
+ jmp %%_ccm_round
+
+%%_prepare_partial_block_to_auth:
+ ; Check if partial block needs to be hashed
+ mov auth_len, [min_job + _msg_len_to_hash_in_bytes]
+ and auth_len, 15
+ je %%_encrypt_digest
+
+ mov word [state + _aes_ccm_init_done + min_idx * 2], 2
+ ;; Update lengths to authenticate and find min length
+ vmovdqa ccm_lens, [state + _aes_ccm_lens]
+ XVPINSRW ccm_lens, xtmp0, tmp2, min_idx, 16, scale_x16
+ vphminposuw min_len_idx, ccm_lens
+ vmovdqa [state + _aes_ccm_lens], ccm_lens
+
+ mov tmp2, min_idx
+ shl tmp2, 6
+ add tmp2, 16 ; pb[AES_BLOCK_SIZE]
+ lea init_block_addr, [state + _aes_ccm_init_blocks + tmp2]
+ mov tmp2, [state + _aes_ccm_args_in + min_idx * 8]
+
+ simd_load_avx_15_1 xtmp0, tmp2, auth_len
+
+%%_finish_partial_block_copy:
+ vmovdqa [init_block_addr], xtmp0
+ mov [state + _aes_ccm_args_in + min_idx * 8], init_block_addr
+
+ jmp %%_ccm_round
+%endmacro
+
+
+align 64
+; JOB_AES_HMAC * submit_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_CCM_AUTH,function,internal)
+SUBMIT_JOB_AES_CCM_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_AVX SUBMIT
+
+; JOB_AES_HMAC * flush_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state)
+; arg 1 : state
+MKGLOBAL(FLUSH_JOB_AES_CCM_AUTH,function,internal)
+FLUSH_JOB_AES_CCM_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_AVX FLUSH
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_cmac_submit_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_cmac_submit_flush_avx.asm
new file mode 100644
index 000000000..e17023004
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_cmac_submit_flush_avx.asm
@@ -0,0 +1,518 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%define AES128_CBC_MAC aes128_cbc_mac_x8
+%define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_avx
+%define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_avx
+
+extern AES128_CBC_MAC
+
+section .data
+default rel
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+dupw:
+ ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+; idx needs to be in rbp
+%define len rbp
+%define idx rbp
+%define tmp rbp
+
+%define lane r8
+
+%define iv r9
+%define m_last r10
+%define n r11
+
+%define unused_lanes rbx
+%define r rbx
+
+%define tmp3 r12
+%define tmp4 r13
+%define tmp2 r14
+
+%define good_lane r15
+%define rbits r15
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+;;; ===========================================================================
+;;; AES CMAC job submit & flush
+;;; ===========================================================================
+;;; SUBMIT_FLUSH [in] - SUBMIT, FLUSH job selection
+%macro GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_AVX 1
+%define %%SUBMIT_FLUSH %1
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ;; Find free lane
+ mov unused_lanes, [state + _aes_cmac_unused_lanes]
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ mov [state + _aes_cmac_unused_lanes], unused_lanes
+
+ ;; Copy job info into lane
+ mov [state + _aes_cmac_job_in_lane + lane*8], job
+ ;; Copy keys into lane args
+ mov tmp, [job + _key_expanded]
+ mov [state + _aes_cmac_args_keys + lane*8], tmp
+ mov tmp, lane
+ shl tmp, 4 ; lane*16
+
+ ;; Zero IV to store digest
+ vpxor xmm0, xmm0
+ vmovdqa [state + _aes_cmac_args_IV + tmp], xmm0
+
+ lea m_last, [state + _aes_cmac_scratch + tmp]
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CMAC)
+ mov len, [job + _msg_len_to_hash_in_bits]
+ mov rbits, len
+ add len, 7 ; inc len if there are remainder bits
+ shr len, 3
+ and rbits, 7
+
+ ;; Check number of blocks and for partial block
+ mov r, len ; set remainder
+ and r, 0xf
+
+ lea n, [len + 0xf] ; set num blocks
+ shr n, 4
+
+ jz %%_lt_one_block ; check one or more blocks?
+
+ ;; One or more blocks, potentially partial
+ mov word [state + _aes_cmac_init_done + lane*2], 0
+
+ mov tmp2, [job + _src]
+ add tmp2, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _aes_cmac_args_in + lane*8], tmp2
+
+ ;; len = (n-1)*16
+ lea tmp2, [n - 1]
+ shl tmp2, 4
+ vmovdqa xmm0, [state + _aes_cmac_lens]
+ XVPINSRW xmm0, xmm1, tmp, lane, tmp2, scale_x16
+ vmovdqa [state + _aes_cmac_lens], xmm0
+
+ ;; check remainder bits
+ or rbits, rbits
+ jnz %%_not_complete_block_3gpp
+
+ ;; check if complete block
+ or r, r
+ jz %%_complete_block
+
+%%_not_complete_block:
+ ;; M_last = padding(M_n) XOR K2
+ lea tmp, [rel padding_0x80_tab16 + 16]
+ sub tmp, r
+ vmovdqu xmm0, [tmp]
+ vmovdqa [m_last], xmm0
+
+ mov tmp, [job + _src]
+ add tmp, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp, tmp3
+
+ memcpy_avx_16 m_last, tmp, r, tmp4, iv
+
+ ;; src + n + r
+ mov tmp3, [job + _skey2]
+ vmovdqa xmm1, [m_last]
+ vmovdqu xmm0, [tmp3]
+ vpxor xmm0, xmm1
+ vmovdqa [m_last], xmm0
+
+%%_step_5:
+ ;; Find min length
+ vmovdqa xmm0, [state + _aes_cmac_lens]
+ vphminposuw xmm1, xmm0
+
+ cmp byte [state + _aes_cmac_unused_lanes], 0xf
+ jne %%_return_null
+
+%else ; end SUBMIT
+
+ ;; Check at least one job
+ bt unused_lanes, 35
+ jc %%_return_null
+
+ ;; Find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp qword [state + _aes_cmac_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp qword [state + _aes_cmac_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp qword [state + _aes_cmac_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+ cmp qword [state + _aes_cmac_job_in_lane + 4*8], 0
+ cmovne good_lane, [rel four]
+ cmp qword [state + _aes_cmac_job_in_lane + 5*8], 0
+ cmovne good_lane, [rel five]
+ cmp qword [state + _aes_cmac_job_in_lane + 6*8], 0
+ cmovne good_lane, [rel six]
+ cmp qword [state + _aes_cmac_job_in_lane + 7*8], 0
+ cmovne good_lane, [rel seven]
+
+ ; Copy good_lane to empty lanes
+ mov tmp2, [state + _aes_cmac_args_in + good_lane*8]
+ mov tmp3, [state + _aes_cmac_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ vmovdqa xmm2, [state + _aes_cmac_args_IV + good_lane]
+ vmovdqa xmm0, [state + _aes_cmac_lens]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_cmac_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_cmac_args_in + I*8], tmp2
+ mov [state + _aes_cmac_args_keys + I*8], tmp3
+ vmovdqa [state + _aes_cmac_args_IV + I*16], xmm2
+ vpor xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+ ;; Find min length
+ vphminposuw xmm1, xmm0
+
+%endif ; end FLUSH
+
+%%_cmac_round:
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je %%_len_is_0
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm1
+ vmovdqa [state + _aes_cmac_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len2 is arg2
+ call AES128_CBC_MAC
+ ; state and idx are intact
+
+ vmovdqa xmm0, [state + _aes_cmac_lens] ; preload lens
+%%_len_is_0:
+ ; Check if job complete
+ test word [state + _aes_cmac_init_done + idx*2], 0xffff
+ jnz %%_copy_complete_digest
+
+ ; Finish step 6
+ mov word [state + _aes_cmac_init_done + idx*2], 1
+
+ XVPINSRW xmm0, xmm1, tmp3, idx, 16, scale_x16
+ vmovdqa [state + _aes_cmac_lens], xmm0
+
+ vphminposuw xmm1, xmm0 ; find min length
+
+ mov tmp3, idx
+ shl tmp3, 4 ; idx*16
+ lea m_last, [state + _aes_cmac_scratch + tmp3]
+ mov [state + _aes_cmac_args_in + idx*8], m_last
+
+ jmp %%_cmac_round
+
+%%_copy_complete_digest:
+ ; Job complete, copy digest to AT output
+ mov job_rax, [state + _aes_cmac_job_in_lane + idx*8]
+
+ mov tmp4, idx
+ shl tmp4, 4
+ lea tmp3, [state + _aes_cmac_args_IV + tmp4]
+ mov tmp4, [job_rax + _auth_tag_output_len_in_bytes]
+ mov tmp2, [job_rax + _auth_tag_output]
+
+ cmp tmp4, 16
+ jne %%_ne_16_copy
+
+ ;; 16 byte AT copy
+ vmovdqa xmm0, [tmp3]
+ vmovdqu [tmp2], xmm0
+ jmp %%_update_lanes
+
+%%_ne_16_copy:
+ memcpy_avx_16 tmp2, tmp3, tmp4, lane, iv
+
+%%_update_lanes:
+ ; Update unused lanes
+ mov unused_lanes, [state + _aes_cmac_unused_lanes]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _aes_cmac_unused_lanes], unused_lanes
+
+ ; Set return job
+ mov job_rax, [state + _aes_cmac_job_in_lane + idx*8]
+
+ mov qword [state + _aes_cmac_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+ ;; Clear digest (in memory for IV) and scratch memory of returned job
+ vmovdqa [tmp3], xmm0
+
+ shl idx, 4
+ vmovdqa [state + _aes_cmac_scratch + idx], xmm0
+
+%else
+ ;; Clear digest and scratch memory of returned job and "NULL lanes"
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_cmac_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ vmovdqa [state + _aes_cmac_args_IV + I*16], xmm0
+ vmovdqa [state + _aes_cmac_scratch + I*16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif ;; SUBMIT
+
+%endif ;; SAFE_DATA
+
+%%_return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%%_return_null:
+ xor job_rax, job_rax
+ jmp %%_return
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+%%_complete_block:
+
+ ;; Block size aligned
+ mov tmp2, [job + _src]
+ add tmp2, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp2, tmp3
+
+ ;; M_last = M_n XOR K1
+ mov tmp3, [job + _skey1]
+ vmovdqu xmm0, [tmp3]
+ vmovdqu xmm1, [tmp2]
+ vpxor xmm0, xmm1
+ vmovdqa [m_last], xmm0
+
+ jmp %%_step_5
+
+%%_lt_one_block:
+ ;; Single partial block
+ mov word [state + _aes_cmac_init_done + lane*2], 1
+ mov [state + _aes_cmac_args_in + lane*8], m_last
+
+ vmovdqa xmm0, [state + _aes_cmac_lens]
+ XVPINSRW xmm0, xmm1, tmp2, lane, 16, scale_x16
+ vmovdqa [state + _aes_cmac_lens], xmm0
+
+ mov n, 1
+ jmp %%_not_complete_block
+
+%%_not_complete_block_3gpp:
+ ;; bit pad last block
+ ;; xor with skey2
+ ;; copy to m_last
+
+ ;; load pointer to src
+ mov tmp, [job + _src]
+ add tmp, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp, tmp3
+
+ ;; check if partial block
+ or r, r
+ jz %%_load_full_block_3gpp
+
+ simd_load_avx_15_1 xmm0, tmp, r
+ dec r
+
+%%_update_mlast_3gpp:
+ ;; set last byte padding mask
+ ;; shift into correct xmm idx
+
+ ;; save and restore rcx on windows
+%ifndef LINUX
+ mov tmp, rcx
+%endif
+ mov rcx, rbits
+ mov tmp3, 0xff
+ shr tmp3, cl
+ movq xmm2, tmp3
+ XVPSLLB xmm2, r, xmm1, tmp2
+
+ ;; pad final byte
+ vpandn xmm2, xmm0
+%ifndef LINUX
+ mov rcx, tmp
+%endif
+ ;; set OR mask to pad final bit
+ mov tmp2, tmp3
+ shr tmp2, 1
+ xor tmp2, tmp3 ; XOR to get OR mask
+ movq xmm3, tmp2
+ ;; xmm1 contains shift table from previous shift
+ vpshufb xmm3, xmm1
+
+ ;; load skey2 address
+ mov tmp3, [job + _skey2]
+ vmovdqu xmm1, [tmp3]
+
+ ;; set final padding bit
+ vpor xmm2, xmm3
+
+ ;; XOR last partial block with skey2
+ ;; update mlast
+ vpxor xmm2, xmm1
+ vmovdqa [m_last], xmm2
+
+ jmp %%_step_5
+
+%%_load_full_block_3gpp:
+ vmovdqu xmm0, [tmp]
+ mov r, 0xf
+ jmp %%_update_mlast_3gpp
+%endif
+%endmacro
+
+
+align 64
+; JOB_AES_HMAC * submit_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_CMAC_AUTH,function,internal)
+SUBMIT_JOB_AES_CMAC_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_AVX SUBMIT
+
+; JOB_AES_HMAC * flush_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state)
+; arg 1 : state
+MKGLOBAL(FLUSH_JOB_AES_CMAC_AUTH,function,internal)
+FLUSH_JOB_AES_CMAC_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_AVX FLUSH
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_flush_avx.asm
new file mode 100644
index 000000000..dbd2a4547
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_flush_avx.asm
@@ -0,0 +1,239 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+
+%ifndef AES_CBC_ENC_X8
+%define AES_CBC_ENC_X8 aes_cbc_enc_128_x8
+%define FLUSH_JOB_AES_ENC flush_job_aes128_enc_avx
+%endif
+
+; void AES_CBC_ENC_X8(AES_ARGS *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X8
+
+section .data
+default rel
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+dupw:
+ ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+%define unused_lanes rbx
+%define tmp1 rbx
+
+%define good_lane rdx
+%define iv rdx
+
+%define tmp2 rax
+
+; idx needs to be in rbp
+%define tmp rbp
+%define idx rbp
+
+%define tmp3 r8
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FLUSH_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(FLUSH_JOB_AES_ENC,function,internal)
+FLUSH_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; check for empty
+ mov unused_lanes, [state + _aes_unused_lanes]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp qword [state + _aes_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp qword [state + _aes_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp qword [state + _aes_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+ cmp qword [state + _aes_job_in_lane + 4*8], 0
+ cmovne good_lane, [rel four]
+ cmp qword [state + _aes_job_in_lane + 5*8], 0
+ cmovne good_lane, [rel five]
+ cmp qword [state + _aes_job_in_lane + 6*8], 0
+ cmovne good_lane, [rel six]
+ cmp qword [state + _aes_job_in_lane + 7*8], 0
+ cmovne good_lane, [rel seven]
+
+ ; copy good_lane to empty lanes
+ mov tmp1, [state + _aes_args_in + good_lane*8]
+ mov tmp2, [state + _aes_args_out + good_lane*8]
+ mov tmp3, [state + _aes_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ vmovdqa xmm2, [state + _aes_args_IV + good_lane]
+ vmovdqa xmm0, [state + _aes_lens]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_args_in + I*8], tmp1
+ mov [state + _aes_args_out + I*8], tmp2
+ mov [state + _aes_args_keys + I*8], tmp3
+ vmovdqa [state + _aes_args_IV + I*16], xmm2
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ ; Find min length
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _aes_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X8
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _aes_unused_lanes], unused_lanes
+%ifdef SAFE_DATA
+ ;; Clear IVs of returned job and "NULL lanes"
+ vpxor xmm0, xmm0
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ vmovdqa [state + _aes_args_IV + I*16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_submit_avx.asm
new file mode 100644
index 000000000..c95fa1f6c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_submit_avx.asm
@@ -0,0 +1,194 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+
+%ifndef AES_CBC_ENC_X8
+%define AES_CBC_ENC_X8 aes_cbc_enc_128_x8
+%define SUBMIT_JOB_AES_ENC submit_job_aes128_enc_avx
+%endif
+
+; void AES_CBC_ENC_X8(AES_ARGS *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X8
+
+section .data
+default rel
+
+align 16
+dupw:
+ ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+; idx needs to be in rbp
+%define len rbp
+%define idx rbp
+%define tmp rbp
+
+%define lane r8
+
+%define iv r9
+
+%define unused_lanes rbx
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* SUBMIT_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_ENC,function,internal)
+SUBMIT_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ mov len, [job + _msg_len_to_cipher_in_bytes]
+ and len, -16 ; DOCSIS may pass size unaligned to block size
+ mov iv, [job + _iv]
+ mov [state + _aes_unused_lanes], unused_lanes
+
+ mov [state + _aes_job_in_lane + lane*8], job
+
+ vmovdqa xmm0, [state + _aes_lens]
+ XVPINSRW xmm0, xmm1, tmp, lane, len, scale_x16
+ vmovdqa [state + _aes_lens], xmm0
+
+ mov tmp, [job + _src]
+ add tmp, [job + _cipher_start_src_offset_in_bytes]
+ vmovdqu xmm0, [iv]
+ mov [state + _aes_args_in + lane*8], tmp
+ mov tmp, [job + _aes_enc_key_expanded]
+ mov [state + _aes_args_keys + lane*8], tmp
+ mov tmp, [job + _dst]
+ mov [state + _aes_args_out + lane*8], tmp
+ shl lane, 4 ; multiply by 16
+ vmovdqa [state + _aes_args_IV + lane], xmm0
+
+ cmp unused_lanes, 0xf
+ jne return_null
+
+ ; Find min length
+ vmovdqa xmm0, [state + _aes_lens]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _aes_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X8
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _aes_unused_lanes], unused_lanes
+%ifdef SAFE_DATA
+ ;; Clear IV
+ vpxor xmm0, xmm0
+ shl idx, 3 ; multiply by 8
+ vmovdqa [state + _aes_args_IV + idx*2], xmm0
+ mov qword [state + _aes_args_keys + idx], 0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_flush_avx.asm
new file mode 100644
index 000000000..a810842a9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_flush_avx.asm
@@ -0,0 +1,264 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+
+%ifndef AES_XCBC_X8
+%define AES_XCBC_X8 aes_xcbc_mac_128_x8
+%define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_avx
+%endif
+
+; void AES_XCBC_X8(AES_XCBC_ARGS_x8 *args, UINT64 len_in_bytes);
+extern AES_XCBC_X8
+
+section .data
+default rel
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+dupw:
+ ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+%define unused_lanes rbx
+%define tmp1 rbx
+
+%define icv rdx
+
+%define tmp2 rax
+
+; idx needs to be in rbp
+%define tmp r10
+%define idx rbp
+
+%define tmp3 r8
+%define lane_data r9
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FLUSH_JOB_AES_XCBC(MB_MGR_AES_XCBC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(FLUSH_JOB_AES_XCBC,function,internal)
+FLUSH_JOB_AES_XCBC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; check for empty
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _aes_xcbc_ldata + 1 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _aes_xcbc_ldata + 2 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _aes_xcbc_ldata + 3 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel three]
+ cmp qword [state + _aes_xcbc_ldata + 4 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel four]
+ cmp qword [state + _aes_xcbc_ldata + 5 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel five]
+ cmp qword [state + _aes_xcbc_ldata + 6 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel six]
+ cmp qword [state + _aes_xcbc_ldata + 7 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel seven]
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ mov tmp1, [state + _aes_xcbc_args_in + idx*8]
+ mov tmp3, [state + _aes_xcbc_args_keys + idx*8]
+ shl idx, 4 ; multiply by 16
+ vmovdqa xmm2, [state + _aes_xcbc_args_ICV + idx]
+ vmovdqa xmm0, [state + _aes_xcbc_lens]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_xcbc_ldata + I * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_xcbc_args_in + I*8], tmp1
+ mov [state + _aes_xcbc_args_keys + I*8], tmp3
+ vmovdqa [state + _aes_xcbc_args_ICV + I*16], xmm2
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _aes_xcbc_lens], xmm0
+
+ ; Find min length
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _aes_xcbc_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_XCBC_X8
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ cmp dword [lane_data + _xcbc_final_done], 0
+ jne end_loop
+
+ mov dword [lane_data + _xcbc_final_done], 1
+ mov word [state + _aes_xcbc_lens + 2*idx], 16
+ lea tmp, [lane_data + _xcbc_final_block]
+ mov [state + _aes_xcbc_args_in + 8*idx], tmp
+ jmp copy_lane_data
+
+end_loop:
+ mov job_rax, [lane_data + _xcbc_job_in_lane]
+ mov icv, [job_rax + _auth_tag_output]
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ mov qword [lane_data + _xcbc_job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ shl idx, 4 ; multiply by 16
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+
+ ; copy 12 bytes
+ vmovdqa xmm0, [state + _aes_xcbc_args_ICV + idx]
+ vmovq [icv], xmm0
+ vpextrd [icv + 8], xmm0, 2
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+ ;; Clear ICV's and final blocks in returned job and NULL lanes
+%assign I 0
+%rep 8
+ cmp qword [state + _aes_xcbc_ldata + I * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+ vmovdqa [state + _aes_xcbc_args_ICV + I*16], xmm0
+ lea lane_data, [state + _aes_xcbc_ldata + (I * _XCBC_LANE_DATA_size)]
+ vmovdqa [lane_data + _xcbc_final_block], xmm0
+ vmovdqa [lane_data + _xcbc_final_block + 16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_submit_avx.asm
new file mode 100644
index 000000000..38f6a6470
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_aes_xcbc_submit_avx.asm
@@ -0,0 +1,272 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+%ifndef AES_XCBC_X8
+%define AES_XCBC_X8 aes_xcbc_mac_128_x8
+%define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_avx
+%endif
+
+; void AES_XCBC_X8(AES_XCBC_ARGS_x8 *args, UINT64 len_in_bytes);
+extern AES_XCBC_X8
+
+
+section .data
+default rel
+
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+; idx needs to be in rbp
+%define len r11
+%define idx rbp
+%define tmp2 rbp
+%define tmp r14
+
+%define lane r8
+%define icv r9
+%define p2 r9
+
+%define last_len r10
+
+%define lane_data r12
+%define p r13
+
+%define unused_lanes rbx
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* SUBMIT_JOB_AES_XCBC(MB_MGR_AES_XCBC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_XCBC,function,internal)
+SUBMIT_JOB_AES_XCBC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ imul lane_data, lane, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+ mov [lane_data + _xcbc_job_in_lane], job
+ mov dword [lane_data + _xcbc_final_done], 0
+ mov tmp, [job + _k1_expanded]
+ mov [state + _aes_xcbc_args_keys + lane*8], tmp
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+
+ mov last_len, len
+
+ cmp len, 16
+ jle small_buffer
+
+ mov [state + _aes_xcbc_args_in + lane*8], p
+ add p, len ; set point to end of data
+
+ and last_len, 15 ; Check lsbs of msg len
+ jnz slow_copy ; if not 16B mult, do slow copy
+
+fast_copy:
+ vmovdqu xmm0, [p - 16] ; load last block M[n]
+ mov tmp, [job + _k2] ; load K2 address
+ vmovdqu xmm1, [tmp] ; load K2
+ vpxor xmm0, xmm0, xmm1 ; M[n] XOR K2
+ vmovdqa [lane_data + _xcbc_final_block], xmm0
+ sub len, 16 ; take last block off length
+end_fast_copy:
+ vpxor xmm0, xmm0, xmm0
+ shl lane, 4 ; multiply by 16
+ vmovdqa [state + _aes_xcbc_args_ICV + lane], xmm0
+
+ vmovdqa xmm0, [state + _aes_xcbc_lens]
+ XVPINSRW xmm0, xmm1, tmp, lane, len, no_scale
+ vmovdqa [state + _aes_xcbc_lens], xmm0
+
+ cmp unused_lanes, 0xf
+ jne return_null
+
+start_loop:
+ ; Find min length
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _aes_xcbc_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_XCBC_X8
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ cmp dword [lane_data + _xcbc_final_done], 0
+ jne end_loop
+
+ mov dword [lane_data + _xcbc_final_done], 1
+
+ vmovdqa xmm0, [state + _aes_xcbc_lens]
+ XVPINSRW xmm0, xmm1, tmp, idx, 16, scale_x16
+ vmovdqa [state + _aes_xcbc_lens], xmm0
+
+ lea tmp, [lane_data + _xcbc_final_block]
+ mov [state + _aes_xcbc_args_in + 8*idx], tmp
+ jmp start_loop
+
+end_loop:
+ ; process completed job "idx"
+ mov job_rax, [lane_data + _xcbc_job_in_lane]
+ mov icv, [job_rax + _auth_tag_output]
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ mov qword [lane_data + _xcbc_job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ shl idx, 4 ; multiply by 16
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+
+ ; copy 12 bytes
+ vmovdqa xmm0, [state + _aes_xcbc_args_ICV + idx]
+ vmovq [icv], xmm0
+ vpextrd [icv + 8], xmm0, 2
+
+%ifdef SAFE_DATA
+ ;; Clear ICV
+ vpxor xmm0, xmm0
+ vmovdqa [state + _aes_xcbc_args_ICV + idx], xmm0
+
+ ;; Clear final block (32 bytes)
+ vmovdqa [lane_data + _xcbc_final_block], xmm0
+ vmovdqa [lane_data + _xcbc_final_block + 16], xmm0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+small_buffer:
+ ; For buffers <= 16 Bytes
+ ; The input data is set to final block
+ lea tmp, [lane_data + _xcbc_final_block] ; final block
+ mov [state + _aes_xcbc_args_in + lane*8], tmp
+ add p, len ; set point to end of data
+ cmp len, 16
+ je fast_copy
+
+slow_copy:
+ and len, ~15 ; take final block off len
+ sub p, last_len ; adjust data pointer
+ lea p2, [lane_data + _xcbc_final_block + 16] ; upper part of final
+ sub p2, last_len ; adjust data pointer backwards
+ memcpy_avx_16_1 p2, p, last_len, tmp, tmp2
+ vmovdqa xmm0, [rel x80] ; fill reg with padding
+ vmovdqu [lane_data + _xcbc_final_block + 16], xmm0 ; add padding
+ vmovdqu xmm0, [p2] ; load final block to process
+ mov tmp, [job + _k3] ; load K3 address
+ vmovdqu xmm1, [tmp] ; load K3
+ vpxor xmm0, xmm0, xmm1 ; M[n] XOR K3
+ vmovdqu [lane_data + _xcbc_final_block], xmm0 ; write final block
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_avx.c b/src/spdk/intel-ipsec-mb/avx/mb_mgr_avx.c
new file mode 100644
index 000000000..29cf2a308
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_avx.c
@@ -0,0 +1,733 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_avx
+
+#include "intel-ipsec-mb.h"
+#include "include/kasumi_internal.h"
+#include "include/zuc_internal.h"
+#include "include/snow3g.h"
+
+#include "save_xmms.h"
+#include "asm.h"
+#include "des.h"
+#include "cpu_feature.h"
+#include "noaesni.h"
+
+JOB_AES_HMAC *submit_job_aes128_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes128_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes192_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes256_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cntr_avx(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_aes_cntr_bit_avx(JOB_AES_HMAC *job);
+
+#define SAVE_XMMS save_xmms_avx
+#define RESTORE_XMMS restore_xmms_avx
+
+#define SUBMIT_JOB_AES128_ENC submit_job_aes128_enc_avx
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_avx
+#define FLUSH_JOB_AES128_ENC flush_job_aes128_enc_avx
+#define SUBMIT_JOB_AES192_ENC submit_job_aes192_enc_avx
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_avx
+#define FLUSH_JOB_AES192_ENC flush_job_aes192_enc_avx
+#define SUBMIT_JOB_AES256_ENC submit_job_aes256_enc_avx
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_avx
+#define FLUSH_JOB_AES256_ENC flush_job_aes256_enc_avx
+#define SUBMIT_JOB_AES_ECB_128_ENC submit_job_aes_ecb_128_enc_avx
+#define SUBMIT_JOB_AES_ECB_128_DEC submit_job_aes_ecb_128_dec_avx
+#define SUBMIT_JOB_AES_ECB_192_ENC submit_job_aes_ecb_192_enc_avx
+#define SUBMIT_JOB_AES_ECB_192_DEC submit_job_aes_ecb_192_dec_avx
+#define SUBMIT_JOB_AES_ECB_256_ENC submit_job_aes_ecb_256_enc_avx
+#define SUBMIT_JOB_AES_ECB_256_DEC submit_job_aes_ecb_256_dec_avx
+
+#define SUBMIT_JOB_AES_CNTR submit_job_aes_cntr_avx
+#define SUBMIT_JOB_AES_CNTR_BIT submit_job_aes_cntr_bit_avx
+
+#define AES_CBC_DEC_128 aes_cbc_dec_128_avx
+#define AES_CBC_DEC_192 aes_cbc_dec_192_avx
+#define AES_CBC_DEC_256 aes_cbc_dec_256_avx
+
+#define AES_CNTR_128 aes_cntr_128_avx
+#define AES_CNTR_192 aes_cntr_192_avx
+#define AES_CNTR_256 aes_cntr_256_avx
+
+#define AES_CNTR_CCM_128 aes_cntr_ccm_128_avx
+
+#define AES_ECB_ENC_128 aes_ecb_enc_128_avx
+#define AES_ECB_ENC_192 aes_ecb_enc_192_avx
+#define AES_ECB_ENC_256 aes_ecb_enc_256_avx
+#define AES_ECB_DEC_128 aes_ecb_dec_128_avx
+#define AES_ECB_DEC_192 aes_ecb_dec_192_avx
+#define AES_ECB_DEC_256 aes_ecb_dec_256_avx
+
+#define SUBMIT_JOB_PON_ENC submit_job_pon_enc_avx
+#define SUBMIT_JOB_PON_DEC submit_job_pon_dec_avx
+#define SUBMIT_JOB_PON_ENC_NO_CTR submit_job_pon_enc_no_ctr_avx
+#define SUBMIT_JOB_PON_DEC_NO_CTR submit_job_pon_dec_no_ctr_avx
+
+#ifndef NO_GCM
+#define AES_GCM_DEC_128 aes_gcm_dec_128_avx_gen2
+#define AES_GCM_ENC_128 aes_gcm_enc_128_avx_gen2
+#define AES_GCM_DEC_192 aes_gcm_dec_192_avx_gen2
+#define AES_GCM_ENC_192 aes_gcm_enc_192_avx_gen2
+#define AES_GCM_DEC_256 aes_gcm_dec_256_avx_gen2
+#define AES_GCM_ENC_256 aes_gcm_enc_256_avx_gen2
+
+#define SUBMIT_JOB_AES_GCM_DEC submit_job_aes_gcm_dec_avx
+#define FLUSH_JOB_AES_GCM_DEC flush_job_aes_gcm_dec_avx
+#define SUBMIT_JOB_AES_GCM_ENC submit_job_aes_gcm_enc_avx
+#define FLUSH_JOB_AES_GCM_ENC flush_job_aes_gcm_enc_avx
+#endif
+
+#define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_avx
+#define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_avx
+
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_avx
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_avx
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_avx
+#define QUEUE_SIZE queue_size_avx
+
+#define SUBMIT_JOB_AES_ENC SUBMIT_JOB_AES_ENC_AVX
+#define FLUSH_JOB_AES_ENC FLUSH_JOB_AES_ENC_AVX
+#define SUBMIT_JOB_AES_DEC SUBMIT_JOB_AES_DEC_AVX
+#define FLUSH_JOB_AES_DEC FLUSH_JOB_AES_DEC_AVX
+
+
+
+JOB_AES_HMAC *submit_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_avx(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_avx(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_avx(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_avx(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_384_avx(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_384_avx(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_512_avx(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_512_avx(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state);
+
+#define SUBMIT_JOB_HMAC submit_job_hmac_avx
+#define FLUSH_JOB_HMAC flush_job_hmac_avx
+#define SUBMIT_JOB_HMAC_SHA_224 submit_job_hmac_sha_224_avx
+#define FLUSH_JOB_HMAC_SHA_224 flush_job_hmac_sha_224_avx
+#define SUBMIT_JOB_HMAC_SHA_256 submit_job_hmac_sha_256_avx
+#define FLUSH_JOB_HMAC_SHA_256 flush_job_hmac_sha_256_avx
+#define SUBMIT_JOB_HMAC_SHA_384 submit_job_hmac_sha_384_avx
+#define FLUSH_JOB_HMAC_SHA_384 flush_job_hmac_sha_384_avx
+#define SUBMIT_JOB_HMAC_SHA_512 submit_job_hmac_sha_512_avx
+#define FLUSH_JOB_HMAC_SHA_512 flush_job_hmac_sha_512_avx
+#define SUBMIT_JOB_HMAC_MD5 submit_job_hmac_md5_avx
+#define FLUSH_JOB_HMAC_MD5 flush_job_hmac_md5_avx
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB submit_job_avx
+#define FLUSH_JOB flush_job_avx
+#define SUBMIT_JOB_NOCHECK submit_job_nocheck_avx
+#define GET_NEXT_JOB get_next_job_avx
+#define GET_COMPLETED_JOB get_completed_job_avx
+
+/* ====================================================================== */
+
+
+#define SUBMIT_JOB_HASH SUBMIT_JOB_HASH_AVX
+#define FLUSH_JOB_HASH FLUSH_JOB_HASH_AVX
+
+/* ====================================================================== */
+
+#define AES_CFB_128_ONE aes_cfb_128_one_avx
+
+void aes128_cbc_mac_x8(AES_ARGS *args, uint64_t len);
+
+#define AES128_CBC_MAC aes128_cbc_mac_x8
+
+#define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_avx
+#define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_avx
+
+#define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_avx
+#define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_avx
+
+/* ====================================================================== */
+
+/*
+ * GCM submit / flush API for AVX arch
+ */
+#ifndef NO_GCM
+static JOB_AES_HMAC *
+submit_job_aes_gcm_dec_avx(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_dec_avx(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+
+static JOB_AES_HMAC *
+submit_job_aes_gcm_enc_avx(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_enc_avx(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+#endif /* NO_GCM */
+
+/* ====================================================================== */
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_avx(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ AES_CNTR_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_CNTR_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_CNTR_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_bit_avx(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_128_avx(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_192_avx(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ aes_cntr_bit_256_avx(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+void
+init_mb_mgr_avx(MB_MGR *state)
+{
+ unsigned int j;
+ uint8_t *p;
+ size_t size;
+
+ state->features = cpu_feature_adjust(state->flags,
+ cpu_feature_detect());
+
+ if (!(state->features & IMB_FEATURE_AESNI)) {
+ init_mb_mgr_sse_no_aesni(state);
+ return;
+ }
+
+ /* Init AES out-of-order fields */
+ memset(state->aes128_ooo.lens, 0xFF,
+ sizeof(state->aes128_ooo.lens));
+ memset(&state->aes128_ooo.lens[0], 0,
+ sizeof(state->aes128_ooo.lens[0]) * 8);
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xF76543210;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+ memset(state->aes192_ooo.lens, 0xFF,
+ sizeof(state->aes192_ooo.lens));
+ memset(&state->aes192_ooo.lens[0], 0,
+ sizeof(state->aes192_ooo.lens[0]) * 8);
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xF76543210;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+ memset(&state->aes256_ooo.lens, 0xFF,
+ sizeof(state->aes256_ooo.lens));
+ memset(&state->aes256_ooo.lens[0], 0,
+ sizeof(state->aes256_ooo.lens[0]) * 8);
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xF76543210;
+ state->aes256_ooo.num_lanes_inuse = 0;
+
+ /* DOCSIS SEC BPI (AES CBC + AES CFB for partial block)
+ * uses same settings as AES128 CBC.
+ */
+ memset(state->docsis_sec_ooo.lens, 0xFF,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(&state->docsis_sec_ooo.lens[0], 0,
+ sizeof(state->docsis_sec_ooo.lens[0]) * 8);
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xF76543210;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+
+
+ /* Init HMAC/SHA1 out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0;
+ state->hmac_sha_1_ooo.lens[3] = 0;
+ state->hmac_sha_1_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_1_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < AVX_NUM_SHA1_LANES; j++) {
+ state->hmac_sha_1_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_1_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_1_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_1_ooo.ldata[j].outer_block;
+ memset(p + 5*4 + 1,
+ 0x00,
+ 64 - 5*4 - 1 - 2);
+ p[5*4] = 0x80;
+ p[64-2] = 0x02;
+ p[64-1] = 0xA0;
+ }
+ /* Init HMAC/SHA224 out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0;
+ state->hmac_sha_224_ooo.lens[3] = 0;
+ state->hmac_sha_224_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_224_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < AVX_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_224_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_sha_224_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_sha_224_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[7 * 4] = 0x80; /* digest 7 words long */
+ p[64 - 2] = 0x02; /* length in little endian = 0x02E0 */
+ p[64 - 1] = 0xE0;
+ }
+
+ /* Init HMAC/SHA256 out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0;
+ state->hmac_sha_256_ooo.lens[3] = 0;
+ state->hmac_sha_256_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_256_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < AVX_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_256_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_256_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_256_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_256_ooo.ldata[j].outer_block;
+ memset(p + 8*4 + 1,
+ 0x00,
+ 64 - 8*4 - 1 - 2);
+ p[8 * 4] = 0x80; /* 8 digest words */
+ p[64 - 2] = 0x03; /* length */
+ p[64 - 1] = 0x00;
+ }
+
+
+ /* Init HMAC/SHA384 out-of-order fields */
+ state->hmac_sha_384_ooo.lens[0] = 0;
+ state->hmac_sha_384_ooo.lens[1] = 0;
+ state->hmac_sha_384_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_384_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < AVX_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_384_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_384_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_384_BLOCK_SIZE + 1),
+ 0x00, SHA_384_BLOCK_SIZE + 7);
+
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA384_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_384_BLOCK_SIZE -
+ SHA384_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA384_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /* hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 384 bits, 1408 bits == 0x0580.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_384_BLOCK_SIZE - 2] = 0x05;
+ p[SHA_384_BLOCK_SIZE - 1] = 0x80;
+ }
+
+ /* Init HMAC/SHA512 out-of-order fields */
+ state->hmac_sha_512_ooo.lens[0] = 0;
+ state->hmac_sha_512_ooo.lens[1] = 0;
+ state->hmac_sha_512_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_512_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < AVX_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_512_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_512_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_512_BLOCK_SIZE + 1),
+ 0x00, SHA_512_BLOCK_SIZE + 7);
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA512_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_512_BLOCK_SIZE -
+ SHA512_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA512_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /*
+ * hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 512 bits, 1536 bits == 0x600.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_512_BLOCK_SIZE - 2] = 0x06;
+ p[SHA_512_BLOCK_SIZE - 1] = 0x00;
+ }
+
+
+ /* Init HMAC/MD5 out-of-order fields */
+ state->hmac_md5_ooo.lens[0] = 0;
+ state->hmac_md5_ooo.lens[1] = 0;
+ state->hmac_md5_ooo.lens[2] = 0;
+ state->hmac_md5_ooo.lens[3] = 0;
+ state->hmac_md5_ooo.lens[4] = 0;
+ state->hmac_md5_ooo.lens[5] = 0;
+ state->hmac_md5_ooo.lens[6] = 0;
+ state->hmac_md5_ooo.lens[7] = 0;
+ state->hmac_md5_ooo.lens[8] = 0xFFFF;
+ state->hmac_md5_ooo.lens[9] = 0xFFFF;
+ state->hmac_md5_ooo.lens[10] = 0xFFFF;
+ state->hmac_md5_ooo.lens[11] = 0xFFFF;
+ state->hmac_md5_ooo.lens[12] = 0xFFFF;
+ state->hmac_md5_ooo.lens[13] = 0xFFFF;
+ state->hmac_md5_ooo.lens[14] = 0xFFFF;
+ state->hmac_md5_ooo.lens[15] = 0xFFFF;
+ state->hmac_md5_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < AVX_NUM_MD5_LANES; j++) {
+ state->hmac_md5_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_md5_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_md5_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[4 * 4] = 0x80;
+ p[64 - 7] = 0x02;
+ p[64 - 8] = 0x80;
+ }
+
+ /* Init AES/XCBC OOO fields */
+ state->aes_xcbc_ooo.lens[0] = 0;
+ state->aes_xcbc_ooo.lens[1] = 0;
+ state->aes_xcbc_ooo.lens[2] = 0;
+ state->aes_xcbc_ooo.lens[3] = 0;
+ state->aes_xcbc_ooo.lens[4] = 0;
+ state->aes_xcbc_ooo.lens[5] = 0;
+ state->aes_xcbc_ooo.lens[6] = 0;
+ state->aes_xcbc_ooo.lens[7] = 0;
+ state->aes_xcbc_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < 8; j++) {
+ state->aes_xcbc_ooo.ldata[j].job_in_lane = NULL;
+ state->aes_xcbc_ooo.ldata[j].final_block[16] = 0x80;
+ memset(state->aes_xcbc_ooo.ldata[j].final_block + 17, 0x00, 15);
+ }
+
+ /* Init AES-CCM auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_ccm_ooo.init_done[j] = 0;
+ state->aes_ccm_ooo.lens[j] = 0;
+ state->aes_ccm_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_ccm_ooo.unused_lanes = 0xF76543210;
+
+ /* Init AES-CMAC auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_cmac_ooo.init_done[j] = 0;
+ state->aes_cmac_ooo.lens[j] = 0;
+ state->aes_cmac_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_cmac_ooo.unused_lanes = 0xF76543210;
+
+ /* Init "in order" components */
+ state->next_job = 0;
+ state->earliest_job = -1;
+
+ /* set AVX handlers */
+ state->get_next_job = get_next_job_avx;
+ state->submit_job = submit_job_avx;
+ state->submit_job_nocheck = submit_job_nocheck_avx;
+ state->get_completed_job = get_completed_job_avx;
+ state->flush_job = flush_job_avx;
+ state->queue_size = queue_size_avx;
+ state->keyexp_128 = aes_keyexp_128_avx;
+ state->keyexp_192 = aes_keyexp_192_avx;
+ state->keyexp_256 = aes_keyexp_256_avx;
+ state->cmac_subkey_gen_128 = aes_cmac_subkey_gen_avx;
+ state->xcbc_keyexp = aes_xcbc_expand_key_avx;
+ state->des_key_sched = des_key_schedule;
+ state->sha1_one_block = sha1_one_block_avx;
+ state->sha1 = sha1_avx;
+ state->sha224_one_block = sha224_one_block_avx;
+ state->sha224 = sha224_avx;
+ state->sha256_one_block = sha256_one_block_avx;
+ state->sha256 = sha256_avx;
+ state->sha384_one_block = sha384_one_block_avx;
+ state->sha384 = sha384_avx;
+ state->sha512_one_block = sha512_one_block_avx;
+ state->sha512 = sha512_avx;
+ state->md5_one_block = md5_one_block_avx;
+ state->aes128_cfb_one = aes_cfb_128_one_avx;
+
+ state->eea3_1_buffer = zuc_eea3_1_buffer_avx;
+ state->eea3_4_buffer = zuc_eea3_4_buffer_avx;
+ state->eea3_n_buffer = zuc_eea3_n_buffer_avx;
+ state->eia3_1_buffer = zuc_eia3_1_buffer_avx;
+
+ state->f8_1_buffer = kasumi_f8_1_buffer_avx;
+ state->f8_1_buffer_bit = kasumi_f8_1_buffer_bit_avx;
+ state->f8_2_buffer = kasumi_f8_2_buffer_avx;
+ state->f8_3_buffer = kasumi_f8_3_buffer_avx;
+ state->f8_4_buffer = kasumi_f8_4_buffer_avx;
+ state->f8_n_buffer = kasumi_f8_n_buffer_avx;
+ state->f9_1_buffer = kasumi_f9_1_buffer_avx;
+ state->f9_1_buffer_user = kasumi_f9_1_buffer_user_avx;
+ state->kasumi_init_f8_key_sched = kasumi_init_f8_key_sched_avx;
+ state->kasumi_init_f9_key_sched = kasumi_init_f9_key_sched_avx;
+ state->kasumi_key_sched_size = kasumi_key_sched_size_avx;
+
+ state->snow3g_f8_1_buffer_bit = snow3g_f8_1_buffer_bit_avx;
+ state->snow3g_f8_1_buffer = snow3g_f8_1_buffer_avx;
+ state->snow3g_f8_2_buffer = snow3g_f8_2_buffer_avx;
+ state->snow3g_f8_4_buffer = snow3g_f8_4_buffer_avx;
+ state->snow3g_f8_8_buffer = snow3g_f8_8_buffer_avx;
+ state->snow3g_f8_n_buffer = snow3g_f8_n_buffer_avx;
+ state->snow3g_f8_8_buffer_multikey = snow3g_f8_8_buffer_multikey_avx;
+ state->snow3g_f8_n_buffer_multikey = snow3g_f8_n_buffer_multikey_avx;
+ state->snow3g_f9_1_buffer = snow3g_f9_1_buffer_avx;
+ state->snow3g_init_key_sched = snow3g_init_key_sched_avx;
+ state->snow3g_key_sched_size = snow3g_key_sched_size_avx;
+
+#ifndef NO_GCM
+ state->gcm128_enc = aes_gcm_enc_128_avx_gen2;
+ state->gcm192_enc = aes_gcm_enc_192_avx_gen2;
+ state->gcm256_enc = aes_gcm_enc_256_avx_gen2;
+ state->gcm128_dec = aes_gcm_dec_128_avx_gen2;
+ state->gcm192_dec = aes_gcm_dec_192_avx_gen2;
+ state->gcm256_dec = aes_gcm_dec_256_avx_gen2;
+ state->gcm128_init = aes_gcm_init_128_avx_gen2;
+ state->gcm192_init = aes_gcm_init_192_avx_gen2;
+ state->gcm256_init = aes_gcm_init_256_avx_gen2;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_avx_gen2;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_avx_gen2;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_avx_gen2;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_avx_gen2;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_avx_gen2;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_avx_gen2;
+ state->gcm128_enc_finalize = aes_gcm_enc_128_finalize_avx_gen2;
+ state->gcm192_enc_finalize = aes_gcm_enc_192_finalize_avx_gen2;
+ state->gcm256_enc_finalize = aes_gcm_enc_256_finalize_avx_gen2;
+ state->gcm128_dec_finalize = aes_gcm_dec_128_finalize_avx_gen2;
+ state->gcm192_dec_finalize = aes_gcm_dec_192_finalize_avx_gen2;
+ state->gcm256_dec_finalize = aes_gcm_dec_256_finalize_avx_gen2;
+ state->gcm128_precomp = aes_gcm_precomp_128_avx_gen2;
+ state->gcm192_precomp = aes_gcm_precomp_192_avx_gen2;
+ state->gcm256_precomp = aes_gcm_precomp_256_avx_gen2;
+ state->gcm128_pre = aes_gcm_pre_128_avx_gen2;
+ state->gcm192_pre = aes_gcm_pre_192_avx_gen2;
+ state->gcm256_pre = aes_gcm_pre_256_avx_gen2;
+#endif
+}
+
+#include "mb_mgr_code.h"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_flush_avx.asm
new file mode 100644
index 000000000..750a630aa
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_flush_avx.asm
@@ -0,0 +1,298 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha1_mult_avx
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+x00: ;ddq 0x00000000000000000000000000000000
+ dq 0x0000000000000000, 0x0000000000000000
+
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%endif
+
+; This routine clobbers rbx, rbp
+struc STACK
+_gpr_save: resq 2
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_avx,function,internal)
+flush_job_hmac_avx:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel three]
+
+copy_lane_data:
+ ; copy valid lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens]
+ mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_mult_avx
+ ; state is intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ ;; idx determines which column
+ ;; read off from consecutive rows
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*4], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp2)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp4)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ lea lane_data, [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_flush_avx.asm
new file mode 100644
index 000000000..a53ad0843
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_flush_avx.asm
@@ -0,0 +1,321 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern md5_x4x2_avx
+
+section .data
+default rel
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+x00: ;ddq 0x00000000000000000000000000000000
+ dq 0x0000000000000000, 0x0000000000000000
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define idx rbp
+
+; unused_lanes must be in rax-rdx
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+%define tmp5 r9
+
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_md5_avx,function,internal)
+flush_job_hmac_md5_avx:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_md5]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_md5 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata_md5 + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata_md5 + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel three]
+ cmp qword [state + _ldata_md5 + 4 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel four]
+ cmp qword [state + _ldata_md5 + 5 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel five]
+ cmp qword [state + _ldata_md5 + 6 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel six]
+ cmp qword [state + _ldata_md5 + 7 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel seven]
+
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens_md5]
+ mov tmp, [state + _args_data_ptr_md5 + PTR_SZ*idx]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_md5 + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_md5], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_md5], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x4x2_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_md5 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+ vmovdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_md5 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_md5]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+; bswap DWORD(tmp2)
+; bswap DWORD(tmp4)
+; bswap DWORD(tmp3)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp5)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp5)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (16 bytes)
+%assign J 0
+%rep 4
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*I + J*MD5_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+
+ lea lane_data, [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 16 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_submit_avx.asm
new file mode 100644
index 000000000..5e4627dca
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_md5_submit_avx.asm
@@ -0,0 +1,355 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/memcpy.asm"
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+
+extern md5_x4x2_avx
+
+section .data
+default rel
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_md5_avx,function,internal)
+submit_job_hmac_md5_avx:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov [state + _unused_lanes_md5], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ ;; insert len into proper lane
+ vmovdqa xmm0, [state + _lens_md5]
+ XVPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ vmovdqa [state + _lens_md5], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu xmm0, [p - 64 + 0*16]
+ vmovdqu xmm1, [p - 64 + 1*16]
+ vmovdqu xmm2, [p - 64 + 2*16]
+ vmovdqu xmm3, [p - 64 + 3*16]
+ vmovdqa [lane_data + _extra_block + 0*16], xmm0
+ vmovdqa [lane_data + _extra_block + 1*16], xmm1
+ vmovdqa [lane_data + _extra_block + 2*16], xmm2
+ vmovdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+; bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ vmovdqa xmm0, [state + _lens_md5]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xf
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_md5]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, xmm1, [rel dupw] ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_md5], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x4x2_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens_md5]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+; vpshufb xmm0, [byteswap wrt rip]
+ vmovdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ vmovdqa xmm0, [state + _lens_md5]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exiting
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_md5]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp3)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 0
+
+ vpxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 16 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_flush_avx.asm
new file mode 100644
index 000000000..416dfb869
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_flush_avx.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_224_avx
+%define SHA224
+
+%include "avx/mb_mgr_hmac_sha_256_flush_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_submit_avx.asm
new file mode 100644
index 000000000..ad0721cd7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_224_submit_avx.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_224_avx
+%define SHA224
+
+%include "avx/mb_mgr_hmac_sha_256_submit_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_flush_avx.asm
new file mode 100644
index 000000000..0d8b8e50e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_flush_avx.asm
@@ -0,0 +1,356 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha_256_mult_avx
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_256_avx
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%endif
+
+; This routine clobbers rbx, rbp; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 3
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_sha256 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata_sha256 + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata_sha256 + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel three]
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ vmovdqa xmm0, [state + _lens_sha256]
+ mov tmp, [state + _args_data_ptr_sha256 + 8*idx]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha256 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_sha256 + 8*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha256], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha_256_mult_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+
+ ;; copy 14 bytes for SHA224 / 16 bytes for SHA256
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp6)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp5)
+%else
+ mov [p + 3*4], DWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 / 32 bytes for SHA256
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp6)
+ mov [p + 3*4], DWORD(tmp5)
+
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+%ifndef SHA224
+ bswap DWORD(tmp5)
+%endif
+ mov [p + 4*4], DWORD(tmp2)
+ mov [p + 5*4], DWORD(tmp4)
+ mov [p + 6*4], DWORD(tmp6)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (28 bytes for SHA-224, 32 bytes for SHA-256 bytes)
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqa [lane_data + _outer_block + 16], xmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_submit_avx.asm
new file mode 100644
index 000000000..738d88b94
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_256_submit_avx.asm
@@ -0,0 +1,428 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha_256_mult_avx
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_256_avx
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rbx, rbp, rsi, rdi; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 5
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*3], rsi
+ mov [rsp + _gpr_save + 8*4], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov [state + _unused_lanes_sha256], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha256 + 8*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu xmm0, [p - 64 + 0*16]
+ vmovdqu xmm1, [p - 64 + 1*16]
+ vmovdqu xmm2, [p - 64 + 2*16]
+ vmovdqu xmm3, [p - 64 + 3*16]
+ vmovdqa [lane_data + _extra_block + 0*16], xmm0
+ vmovdqa [lane_data + _extra_block + 1*16], xmm1
+ vmovdqa [lane_data + _extra_block + 2*16], xmm2
+ vmovdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*lane + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*lane + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*lane + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha256]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha_256_mult_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exit
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+ ; copy 14 bytes for SHA224 / 16 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp4)
+%else
+ mov [p + 3*4], DWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 / 32 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+ mov [p + 3*4], DWORD(tmp4)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+%ifndef SHA224
+ bswap DWORD(tmp4)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp3)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B) of returned job
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqa [lane_data + _outer_block + 16], xmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*3]
+ mov rdi, [rsp + _gpr_save + 8*4]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_flush_avx.asm
new file mode 100644
index 000000000..f3491ab27
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_flush_avx.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_384_avx
+%define SHA_X_DIGEST_SIZE 384
+
+%include "avx/mb_mgr_hmac_sha_512_flush_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_submit_avx.asm
new file mode 100644
index 000000000..a2fb0f1c6
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_384_submit_avx.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_384_avx
+%define SHA_X_DIGEST_SIZE 384
+
+%include "avx/mb_mgr_hmac_sha_512_submit_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_flush_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_flush_avx.asm
new file mode 100644
index 000000000..2de170948
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_flush_avx.asm
@@ -0,0 +1,339 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha512_x2_avx
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+one: dq 1
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_512_avx
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%endif
+
+; This routine clobbers rbx, rbp
+struc STACK
+_gpr_save: resq 2
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_512_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ bt unused_lanes, 16+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_sha512 + 1 * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ cmovne idx, [rel one]
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens_sha512]
+ mov tmp, [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*idx]
+
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha512], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0xA0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x2_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+ mov word [state + _lens_sha512 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+ ; move digest into data location
+ %assign I 0
+ %rep (SHA_X_DIGEST_SIZE / (8*16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I * 16], xmm0
+ %assign I (I+1)
+ %endrep
+
+ ; move the opad key into digest
+ mov tmp, [job + _auth_key_xor_opad]
+
+ %assign I 0
+ %rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+ %assign I (I+1)
+ %endrep
+
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+ mov [state + _lens_sha512 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+ mov [p + 3*8], QWORD(tmp5)
+
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 4*8], QWORD(tmp2)
+ mov [p + 5*8], QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp6)
+ mov [p + 7*8], QWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size) + _job_in_lane_sha512], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (48 bytes for SHA-384, 64 bytes for SHA-512 bytes)
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 6*SHA512_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 7*SHA512_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size)]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 8
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 16], xmm0
+ vmovdqa [lane_data + _outer_block + 32], xmm0
+%if (SHA_X_DIGEST_SIZE != 384)
+ vmovdqa [lane_data + _outer_block + 48], xmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_submit_avx.asm
new file mode 100644
index 000000000..b37884d0f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_submit_avx.asm
@@ -0,0 +1,416 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha512_x2_avx
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_512_avx
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rbx, rbp, rsi, rdi
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_sha_512_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov [state + _unused_lanes_sha512], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 7 ; divide by 128, len in terms of blocks
+
+ mov [lane_data + _job_in_lane_sha512], job
+ mov dword [lane_data + _outer_done_sha512], 0
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ mov last_len, len
+ and last_len, 127
+ lea extra_blocks, [last_len + 17 + 127]
+ shr extra_blocks, 7
+ mov [lane_data + _extra_blocks_sha512], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], p
+
+ cmp len, 128
+ jb copy_lt128
+
+fast_copy:
+ add p, len
+%assign I 0
+%rep 2
+ vmovdqu xmm0, [p - 128 + I*4*16 + 0*16]
+ vmovdqu xmm1, [p - 128 + I*4*16 + 1*16]
+ vmovdqu xmm2, [p - 128 + I*4*16 + 2*16]
+ vmovdqu xmm3, [p - 128 + I*4*16 + 3*16]
+ vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 0*16], xmm0
+ vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 1*16], xmm1
+ vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 2*16], xmm2
+ vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 3*16], xmm3
+%assign I (I+1)
+%endrep
+
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 7
+ sub size_offset, last_len
+ add size_offset, 128-8
+ mov [lane_data + _size_offset_sha512], DWORD(size_offset)
+ mov start_offset, 128
+ sub start_offset, last_len
+ mov [lane_data + _start_offset_sha512], DWORD(start_offset)
+
+ lea tmp, [8*128 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block_sha512 + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 2 * 8]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ test len, ~127
+ jnz ge128_bytes
+
+lt128_bytes:
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], tmp ;; 8 to hold a UINT8
+ mov dword [lane_data + _extra_blocks_sha512], 0
+
+ge128_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha512]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...1)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0xA0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x2_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+%assign I 0
+%rep (SHA_X_DIGEST_SIZE / (8 * 16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I * 16], xmm0
+%assign I (I+1)
+%endrep
+
+ mov tmp, [job + _auth_key_xor_opad]
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp ;; idx is index of shortest length message
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp start_loop
+
+ align 16
+copy_lt128:
+ ;; less than one message block of data
+ ;; destination extra block but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 128]
+ sub p2, len
+ memcpy_avx_128_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+ ;; copy 32 bytes for SHA512 / 24 bytes and SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes and SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+ mov [p + 3*8], QWORD(tmp4)
+
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 4*8], QWORD(tmp)
+ mov [p + 5*8], QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp3)
+ mov [p + 7*8], QWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA256_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxor xmm0, xmm0
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 8
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 16], xmm0
+ vmovdqa [lane_data + _outer_block + 32], xmm0
+%if (SHA_X_DIGEST_SIZE != 384)
+ vmovdqa [lane_data + _outer_block + 48], xmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_submit_avx.asm b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_submit_avx.asm
new file mode 100644
index 000000000..418f0bc43
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_submit_avx.asm
@@ -0,0 +1,358 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha1_mult_avx
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rdi, rsi, rbx, rbp
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_avx,function,internal)
+submit_job_hmac_avx:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov [state + _unused_lanes], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr + PTR_SZ*lane], p
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu xmm0, [p - 64 + 0*16]
+ vmovdqu xmm1, [p - 64 + 1*16]
+ vmovdqu xmm2, [p - 64 + 2*16]
+ vmovdqu xmm3, [p - 64 + 3*16]
+ vmovdqa [lane_data + _extra_block + 0*16], xmm0
+ vmovdqa [lane_data + _extra_block + 1*16], xmm1
+ vmovdqa [lane_data + _extra_block + 2*16], xmm2
+ vmovdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_mult_avx
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ mov [p + 0*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 1*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 2*SHA1_DIGEST_WORD_SIZE], DWORD(tmp3)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ vpxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/md5_x4x2_avx.asm b/src/spdk/intel-ipsec-mb/avx/md5_x4x2_avx.asm
new file mode 100644
index 000000000..1aa2c2600
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/md5_x4x2_avx.asm
@@ -0,0 +1,716 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute octal MD5 using AVX
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rbx rdx rsi rdi r8 r9 r10 r11 r12 r13 r14 r15
+;; Windows preserves: rcx rbp
+;;
+;; Linux clobbers: rax rbx rcx rdx rsi r8 r9 r10 r11 r12 r13 r14 r15
+;; Linux preserves: rdi rbp
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+extern MD5_TABLE
+
+section .data
+default rel
+align 64
+ONES:
+ dd 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+
+section .text
+
+%ifdef LINUX
+;; Linux Registers
+%define arg1 rdi
+%define arg2 rsi
+%define mem1 rcx
+%define mem2 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define mem1 rdi
+%define mem2 rsi
+%endif
+
+;; rbp is not clobbered
+
+%define state arg1
+%define num_blks arg2
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+%define inp4 r12
+%define inp5 r13
+%define inp6 r14
+%define inp7 r15
+
+%define TBL rax
+%define IDX rbx
+
+%define A xmm0
+%define B xmm1
+%define C xmm2
+%define D xmm3
+%define E xmm4 ; tmp
+%define F xmm5 ; tmp
+
+%define A2 xmm6
+%define B2 xmm7
+%define C2 xmm8
+%define D2 xmm9
+
+
+%define FUN E
+%define TMP F
+%define FUN2 xmm10
+%define TMP2 xmm11
+
+%define T0 xmm10
+%define T1 xmm11
+%define T2 xmm12
+%define T3 xmm13
+%define T4 xmm14
+%define T5 xmm15
+
+; Stack Layout
+;
+; 470 DD2
+; 460 CC2
+; 450 BB2
+; 440 AA2
+; 430 DD
+; 420 CC
+; 410 BB
+; 400 AA
+;
+; 3F0 data2[15] for lanes 7...4 \
+; ... \
+; 300 data2[0] for lanes 7...4 \
+; 2F0 data2[15] for lanes 3...0 > mem block 2
+; ... /
+; 210 data2[1] for lanes 3...0 /
+; 200 data2[0] for lanes 3...0 /
+;
+; 1F0 data1[15] for lanes 7...4 \
+; ... \
+; 100 data1[0] for lanes 7...4 \
+; F0 data1[15] for lanes 3...0 > mem block 1
+; ... /
+; 10 data1[1] for lanes 3...0 /
+; 0 data1[0] for lanes 3...0 /
+
+; stack size must be an odd multiple of 8 bytes in size
+struc STACK
+_DATA: reso 2*2*16 ; 2 blocks * 2 sets of lanes * 16 regs
+_DIGEST: reso 8 ; stores AA-DD, AA2-DD2
+ resb 8 ; for alignment
+endstruc
+%define STACK_SIZE STACK_size
+
+%define AA rsp + _DIGEST + 16*0
+%define BB rsp + _DIGEST + 16*1
+%define CC rsp + _DIGEST + 16*2
+%define DD rsp + _DIGEST + 16*3
+%define AA2 rsp + _DIGEST + 16*4
+%define BB2 rsp + _DIGEST + 16*5
+%define CC2 rsp + _DIGEST + 16*6
+%define DD2 rsp + _DIGEST + 16*7
+
+;;
+;; MD5 left rotations (number of bits)
+;;
+rot11 equ 7
+rot12 equ 12
+rot13 equ 17
+rot14 equ 22
+rot21 equ 5
+rot22 equ 9
+rot23 equ 14
+rot24 equ 20
+rot31 equ 4
+rot32 equ 11
+rot33 equ 16
+rot34 equ 23
+rot41 equ 6
+rot42 equ 10
+rot43 equ 15
+rot44 equ 21
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ vshufps %%r1, %%t0, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+ vshufps %%r3, %%r0, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ vshufps %%r0, %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ vshufps %%t0, %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+
+;;
+;; Magic functions defined in RFC 1321
+;;
+; macro MAGIC_F F,X,Y,Z ;; F = ((Z) ^ ((X) & ((Y) ^ (Z))))
+%macro MAGIC_F 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z, %%Y
+ vpand %%F,%%F,%%X
+ vpxor %%F,%%F,%%Z
+%endmacro
+
+; macro MAGIC_G F,X,Y,Z ;; F = F((Z),(X),(Y))
+%macro MAGIC_G 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ MAGIC_F %%F,%%Z,%%X,%%Y
+%endmacro
+
+; macro MAGIC_H F,X,Y,Z ;; F = ((X) ^ (Y) ^ (Z))
+%macro MAGIC_H 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z, %%Y
+ vpxor %%F,%%F, %%X
+%endmacro
+
+; macro MAGIC_I F,X,Y,Z ;; F = ((Y) ^ ((X) | ~(Z)))
+%macro MAGIC_I 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z,[rel ONES] ; pnot %%F
+ vpor %%F,%%F,%%X
+ vpxor %%F,%%F,%%Y
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpsrld %%tmp, %%reg, (32-%%imm)
+ vpslld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+;;
+;; single MD5 step
+;;
+;; A = B +ROL32((A +MAGIC(B,C,D) +data +const), nrot)
+;;
+; macro MD5_STEP1 MAGIC_FUN, A,B,C,D, A2,B2,C3,D2, FUN, TMP, data, MD5const, nrot
+%macro MD5_STEP1 14
+%define %%MAGIC_FUN %1
+%define %%A %2
+%define %%B %3
+%define %%C %4
+%define %%D %5
+%define %%A2 %6
+%define %%B2 %7
+%define %%C2 %8
+%define %%D2 %9
+%define %%FUN %10
+%define %%TMP %11
+%define %%data %12
+%define %%MD5const %13
+%define %%nrot %14
+
+ vpaddd %%A, %%A, %%MD5const
+ vpaddd %%A2, %%A2, %%MD5const
+ vpaddd %%A, %%A, [%%data]
+ vpaddd %%A2, %%A2, [%%data + 16*16]
+ %%MAGIC_FUN %%FUN, %%B,%%C,%%D
+ vpaddd %%A, %%A, %%FUN
+ %%MAGIC_FUN %%FUN, %%B2,%%C2,%%D2
+ vpaddd %%A2, %%A2, %%FUN
+ PROLD %%A,%%nrot, %%TMP
+ PROLD %%A2,%%nrot, %%TMP
+ vpaddd %%A, %%A, %%B
+ vpaddd %%A2, %%A2, %%B2
+%endmacro
+
+;;
+;; single MD5 step
+;;
+;; A = B +ROL32((A +MAGIC(B,C,D) +data +const), nrot)
+;;
+; macro MD5_STEP MAGIC_FUN, A,B,C,D, A2,B2,C3,D2, FUN, TMP, FUN2, TMP2, data,
+; MD5const, nrot
+%macro MD5_STEP 16
+%define %%MAGIC_FUN %1
+%define %%A %2
+%define %%B %3
+%define %%C %4
+%define %%D %5
+%define %%A2 %6
+%define %%B2 %7
+%define %%C2 %8
+%define %%D2 %9
+%define %%FUN %10
+%define %%TMP %11
+%define %%FUN2 %12
+%define %%TMP2 %13
+%define %%data %14
+%define %%MD5const %15
+%define %%nrot %16
+
+ vmovdqa %%TMP,[%%data]
+ vmovdqa %%TMP2,[%%data + 16*16]
+ vpaddd %%A, %%A, %%MD5const
+ vpaddd %%A2, %%A2, %%MD5const
+ vpaddd %%A, %%A, %%TMP
+ vpaddd %%A2, %%A2, %%TMP2
+ %%MAGIC_FUN %%FUN, %%B,%%C,%%D
+ %%MAGIC_FUN %%FUN2, %%B2,%%C2,%%D2
+ vpaddd %%A, %%A, %%FUN
+ vpaddd %%A2, %%A2, %%FUN2
+ PROLD %%A,%%nrot, %%TMP
+ PROLD %%A2,%%nrot, %%TMP2
+ vpaddd %%A, %%A, %%B
+ vpaddd %%A2, %%A2, %%B2
+%endmacro
+
+; void md5_x4x2_avx(MD5_ARGS *args, UINT64 num_blks)
+; arg 1 : pointer to MD5_ARGS structure
+; arg 2 : number of blocks (>=1)
+;
+align 32
+MKGLOBAL(md5_x4x2_avx,function,internal)
+md5_x4x2_avx:
+
+ sub rsp, STACK_SIZE
+
+ ;; each row of transposed digests is split into 2 parts, the right half stored in A, and left half in A2
+ ;; Initialize digests
+ vmovdqa A,[state + 0*16 + 0*MD5_DIGEST_ROW_SIZE]
+ vmovdqa B,[state + 0*16 + 1*MD5_DIGEST_ROW_SIZE]
+ vmovdqa C,[state + 0*16 + 2*MD5_DIGEST_ROW_SIZE]
+ vmovdqa D,[state + 0*16 + 3*MD5_DIGEST_ROW_SIZE]
+
+ vmovdqa A2,[state + 1*16 + 0*MD5_DIGEST_ROW_SIZE]
+ vmovdqa B2,[state + 1*16 + 1*MD5_DIGEST_ROW_SIZE]
+ vmovdqa C2,[state + 1*16 + 2*MD5_DIGEST_ROW_SIZE]
+ vmovdqa D2,[state + 1*16 + 3*MD5_DIGEST_ROW_SIZE]
+
+ lea TBL, [rel MD5_TABLE]
+
+ ;; load input pointers
+ mov inp0,[state+_data_ptr_md5 +0*PTR_SZ]
+ mov inp1,[state+_data_ptr_md5 +1*PTR_SZ]
+ mov inp2,[state+_data_ptr_md5 +2*PTR_SZ]
+ mov inp3,[state+_data_ptr_md5 +3*PTR_SZ]
+ mov inp4,[state+_data_ptr_md5 +4*PTR_SZ]
+ mov inp5,[state+_data_ptr_md5 +5*PTR_SZ]
+ mov inp6,[state+_data_ptr_md5 +6*PTR_SZ]
+ mov inp7,[state+_data_ptr_md5 +7*PTR_SZ]
+ xor IDX, IDX
+
+ ; Make ping-pong pointers to the two memory blocks
+ mov mem1, rsp
+ lea mem2, [rsp + 16*16*2]
+
+;; Load first block of data and save back to stack
+%assign I 0
+%rep 4
+ vmovdqu T2,[inp0+IDX+I*16]
+ vmovdqu T1,[inp1+IDX+I*16]
+ vmovdqu T4,[inp2+IDX+I*16]
+ vmovdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem1+(I*4+0)*16],T0
+ vmovdqa [mem1+(I*4+1)*16],T1
+ vmovdqa [mem1+(I*4+2)*16],T2
+ vmovdqa [mem1+(I*4+3)*16],T3
+
+ vmovdqu T2,[inp4+IDX+I*16]
+ vmovdqu T1,[inp5+IDX+I*16]
+ vmovdqu T4,[inp6+IDX+I*16]
+ vmovdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem1+(I*4+0)*16 + 16*16],T0
+ vmovdqa [mem1+(I*4+1)*16 + 16*16],T1
+ vmovdqa [mem1+(I*4+2)*16 + 16*16],T2
+ vmovdqa [mem1+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+%endrep
+
+lloop:
+ ; save old digests
+ vmovdqa [AA], A
+ vmovdqa [BB], B
+ vmovdqa [CC], C
+ vmovdqa [DD], D
+ ; save old digests
+ vmovdqa [AA2], A2
+ vmovdqa [BB2], B2
+ vmovdqa [CC2], C2
+ vmovdqa [DD2], D2
+
+ add IDX, 4*16
+ sub num_blks, 1
+ je lastblock
+
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 0*16, [TBL+ 0*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 1*16, [TBL+ 1*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 2*16, [TBL+ 2*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 3*16, [TBL+ 3*16], rot14
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 4*16, [TBL+ 4*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 5*16, [TBL+ 5*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 6*16, [TBL+ 6*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 7*16, [TBL+ 7*16], rot14
+
+%assign I 0
+ vmovdqu T2,[inp0+IDX+I*16]
+ vmovdqu T1,[inp1+IDX+I*16]
+ vmovdqu T4,[inp2+IDX+I*16]
+ vmovdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16],T0
+ vmovdqa [mem2+(I*4+1)*16],T1
+ vmovdqa [mem2+(I*4+2)*16],T2
+ vmovdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 8*16, [TBL+ 8*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 9*16, [TBL+ 9*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +10*16, [TBL+10*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +11*16, [TBL+11*16], rot14
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +12*16, [TBL+12*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +13*16, [TBL+13*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +14*16, [TBL+14*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +15*16, [TBL+15*16], rot14
+
+
+ vmovdqu T2,[inp4+IDX+I*16]
+ vmovdqu T1,[inp5+IDX+I*16]
+ vmovdqu T4,[inp6+IDX+I*16]
+ vmovdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16 + 16*16],T0
+ vmovdqa [mem2+(I*4+1)*16 + 16*16],T1
+ vmovdqa [mem2+(I*4+2)*16 + 16*16],T2
+ vmovdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 1*16, [TBL+16*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 6*16, [TBL+17*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +11*16, [TBL+18*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 0*16, [TBL+19*16], rot24
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 5*16, [TBL+20*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +10*16, [TBL+21*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +15*16, [TBL+22*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 4*16, [TBL+23*16], rot24
+
+ vmovdqu T2,[inp0+IDX+I*16]
+ vmovdqu T1,[inp1+IDX+I*16]
+ vmovdqu T4,[inp2+IDX+I*16]
+ vmovdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16],T0
+ vmovdqa [mem2+(I*4+1)*16],T1
+ vmovdqa [mem2+(I*4+2)*16],T2
+ vmovdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 9*16, [TBL+24*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +14*16, [TBL+25*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 3*16, [TBL+26*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 8*16, [TBL+27*16], rot24
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +13*16, [TBL+28*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 2*16, [TBL+29*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 7*16, [TBL+30*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +12*16, [TBL+31*16], rot24
+
+ vmovdqu T2,[inp4+IDX+I*16]
+ vmovdqu T1,[inp5+IDX+I*16]
+ vmovdqu T4,[inp6+IDX+I*16]
+ vmovdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16 + 16*16],T0
+ vmovdqa [mem2+(I*4+1)*16 + 16*16],T1
+ vmovdqa [mem2+(I*4+2)*16 + 16*16],T2
+ vmovdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 5*16, [TBL+32*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 8*16, [TBL+33*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +11*16, [TBL+34*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +14*16, [TBL+35*16], rot34
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 1*16, [TBL+36*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 4*16, [TBL+37*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 7*16, [TBL+38*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +10*16, [TBL+39*16], rot34
+
+ vmovdqu T2,[inp0+IDX+I*16]
+ vmovdqu T1,[inp1+IDX+I*16]
+ vmovdqu T4,[inp2+IDX+I*16]
+ vmovdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16],T0
+ vmovdqa [mem2+(I*4+1)*16],T1
+ vmovdqa [mem2+(I*4+2)*16],T2
+ vmovdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +13*16, [TBL+40*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 0*16, [TBL+41*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 3*16, [TBL+42*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 6*16, [TBL+43*16], rot34
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 9*16, [TBL+44*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +12*16, [TBL+45*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +15*16, [TBL+46*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 2*16, [TBL+47*16], rot34
+
+ vmovdqu T2,[inp4+IDX+I*16]
+ vmovdqu T1,[inp5+IDX+I*16]
+ vmovdqu T4,[inp6+IDX+I*16]
+ vmovdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16 + 16*16],T0
+ vmovdqa [mem2+(I*4+1)*16 + 16*16],T1
+ vmovdqa [mem2+(I*4+2)*16 + 16*16],T2
+ vmovdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 0*16, [TBL+48*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 7*16, [TBL+49*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +14*16, [TBL+50*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 5*16, [TBL+51*16], rot44
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +12*16, [TBL+52*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 3*16, [TBL+53*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +10*16, [TBL+54*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 1*16, [TBL+55*16], rot44
+
+ vmovdqu T2,[inp0+IDX+I*16]
+ vmovdqu T1,[inp1+IDX+I*16]
+ vmovdqu T4,[inp2+IDX+I*16]
+ vmovdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16],T0
+ vmovdqa [mem2+(I*4+1)*16],T1
+ vmovdqa [mem2+(I*4+2)*16],T2
+ vmovdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 8*16, [TBL+56*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +15*16, [TBL+57*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 6*16, [TBL+58*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +13*16, [TBL+59*16], rot44
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 4*16, [TBL+60*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +11*16, [TBL+61*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 2*16, [TBL+62*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 9*16, [TBL+63*16], rot44
+
+ vmovdqu T2,[inp4+IDX+I*16]
+ vmovdqu T1,[inp5+IDX+I*16]
+ vmovdqu T4,[inp6+IDX+I*16]
+ vmovdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vmovdqa [mem2+(I*4+0)*16 + 16*16],T0
+ vmovdqa [mem2+(I*4+1)*16 + 16*16],T1
+ vmovdqa [mem2+(I*4+2)*16 + 16*16],T2
+ vmovdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+
+ vpaddd A,A,[AA]
+ vpaddd B,B,[BB]
+ vpaddd C,C,[CC]
+ vpaddd D,D,[DD]
+
+ vpaddd A2,A2,[AA2]
+ vpaddd B2,B2,[BB2]
+ vpaddd C2,C2,[CC2]
+ vpaddd D2,D2,[DD2]
+
+ ; swap mem1 and mem2
+ xchg mem1, mem2
+
+ jmp lloop
+
+lastblock:
+
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+ 0*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+ 1*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+ 2*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+ 3*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+ 4*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+ 5*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+ 6*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+ 7*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+ 8*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+ 9*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+10*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+11*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+12*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+13*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+14*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+15*16], rot14
+
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+16*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+17*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+18*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+19*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+20*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+21*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+22*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+23*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+24*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+25*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+26*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+27*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+28*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+29*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+30*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+31*16], rot24
+
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+32*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+33*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+34*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+35*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+36*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+37*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+38*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+39*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+40*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+41*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+42*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+43*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+44*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+45*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+46*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+47*16], rot34
+
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+48*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+49*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+50*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+51*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+52*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+53*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+54*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+55*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+56*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+57*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+58*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+59*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+60*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+61*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+62*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+63*16], rot44
+
+ vpaddd A,A,[AA]
+ vpaddd B,B,[BB]
+ vpaddd C,C,[CC]
+ vpaddd D,D,[DD]
+
+ vpaddd A2,A2,[AA2]
+ vpaddd B2,B2,[BB2]
+ vpaddd C2,C2,[CC2]
+ vpaddd D2,D2,[DD2]
+
+ ; write out digests
+ vmovdqu [state + 0*16 + 0*MD5_DIGEST_ROW_SIZE ], A
+ vmovdqu [state + 0*16 + 1*MD5_DIGEST_ROW_SIZE ], B
+ vmovdqu [state + 0*16 + 2*MD5_DIGEST_ROW_SIZE ], C
+ vmovdqu [state + 0*16 + 3*MD5_DIGEST_ROW_SIZE ], D
+ vmovdqu [state + 1*16 + 0*MD5_DIGEST_ROW_SIZE], A2
+ vmovdqu [state + 1*16 + 1*MD5_DIGEST_ROW_SIZE], B2
+ vmovdqu [state + 1*16 + 2*MD5_DIGEST_ROW_SIZE], C2
+ vmovdqu [state + 1*16 + 3*MD5_DIGEST_ROW_SIZE], D2
+
+ ;; update input pointers
+ add inp0, IDX
+ add inp1, IDX
+ add inp2, IDX
+ add inp3, IDX
+ add inp4, IDX
+ add inp5, IDX
+ add inp6, IDX
+ add inp7, IDX
+ mov [state +_data_ptr_md5 + 0*PTR_SZ], inp0
+ mov [state +_data_ptr_md5 + 1*PTR_SZ], inp1
+ mov [state +_data_ptr_md5 + 2*PTR_SZ], inp2
+ mov [state +_data_ptr_md5 + 3*PTR_SZ], inp3
+ mov [state +_data_ptr_md5 + 4*PTR_SZ], inp4
+ mov [state +_data_ptr_md5 + 5*PTR_SZ], inp5
+ mov [state +_data_ptr_md5 + 6*PTR_SZ], inp6
+ mov [state +_data_ptr_md5 + 7*PTR_SZ], inp7
+
+ ;; Clear stack frame (72*16 bytes)
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+%assign i 0
+%rep (2*2*16+8)
+ vmovdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+ add rsp, STACK_SIZE
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/pon_avx.asm b/src/spdk/intel-ipsec-mb/avx/pon_avx.asm
new file mode 100644
index 000000000..8510dc4a3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/pon_avx.asm
@@ -0,0 +1,1170 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%use smartalign
+
+%include "job_aes_hmac.asm"
+%include "include/os.asm"
+%include "include/memcpy.asm"
+
+;;; This is implementation of stitched algorithms: AES128-CTR + CRC32 + BIP
+;;; This combination is required by PON/xPON/gPON standard.
+;;; Note: BIP is running XOR of double words
+;;; Order of operations:
+;;; - encrypt: HEC update (XGEM header), CRC32 (Ethernet FCS), AES-CTR and BIP
+;;; - decrypt: BIP, AES-CTR and CRC32 (Ethernet FCS)
+
+extern byteswap_const
+extern ddq_add_1
+
+section .data
+default rel
+
+;;; Precomputed constants for CRC32 (Ethernet FCS)
+;;; Details of the CRC algorithm and 4 byte buffer of
+;;; {0x01, 0x02, 0x03, 0x04}:
+;;; Result Poly Init RefIn RefOut XorOut
+;;; 0xB63CFBCD 0x04C11DB7 0xFFFFFFFF true true 0xFFFFFFFF
+align 16
+rk1:
+ dq 0x00000000ccaa009e, 0x00000001751997d0
+
+align 16
+rk5:
+ dq 0x00000000ccaa009e, 0x0000000163cd6124
+
+align 16
+rk7:
+ dq 0x00000001f7011640, 0x00000001db710640
+
+align 16
+pshufb_shf_table:
+ ;; use these values for shift registers with the pshufb instruction
+ dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
+ dq 0x0706050403020100, 0x000e0d0c0b0a0908
+
+align 16
+init_crc_value:
+ dq 0x00000000FFFFFFFF, 0x0000000000000000
+
+align 16
+mask:
+ dq 0xFFFFFFFFFFFFFFFF, 0x0000000000000000
+
+align 16
+mask2:
+ dq 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFF
+align 16
+mask3:
+ dq 0x8080808080808080, 0x8080808080808080
+
+align 16
+mask_out_top_bytes:
+ dq 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
+ dq 0x0000000000000000, 0x0000000000000000
+
+align 16
+ddq_add_1_1:
+ dq 0x1, 0x1
+
+;; Precomputed constants for HEC calculation (XGEM header)
+;; POLY 0x53900000:
+;; k1 = 0xf9800000
+;; k2 = 0xa0900000
+;; k3 = 0x7cc00000
+;; q = 0x46b927ec
+;; p_res = 0x53900000
+
+align 16
+k3_q:
+ dq 0x7cc00000, 0x46b927ec
+
+align 16
+p_res:
+ dq 0x53900000, 0
+
+align 16
+mask_out_top_64bits:
+ dq 0xffffffff_ffffffff, 0
+
+section .text
+
+%define NUM_AES_ROUNDS 10
+
+%define xcounter xmm0
+%define xbip xmm1
+%define xcrc xmm2
+%define xcrckey xmm3
+%define xtmp1 xmm4
+%define xtmp2 xmm5
+%define xtmp3 xmm6
+%define xtmp4 xmm7
+%define xtmp5 xmm8
+%define xtmp6 xmm9
+%define xtmp7 xmm10
+%define xtmp8 xmm11
+%define xtmp9 xmm12
+%define xtmp10 xmm13
+%define xtmp11 xmm14
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%define tmp_1 r8
+%define tmp_2 r9
+%define tmp_3 r10
+%define tmp_4 r11
+%define tmp_5 r12
+%define tmp_6 r13
+%define tmp_7 r14
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%define tmp_1 r10
+%define tmp_2 r11
+%define tmp_3 rax
+%define tmp_4 r12
+%define tmp_5 r13
+%define tmp_6 r14
+%define tmp_7 r15
+%endif
+
+%define job arg1
+
+%define p_in arg2
+%define p_keys arg3
+%define p_out arg4
+
+%define num_bytes tmp_1 ; bytes to cipher
+%define tmp tmp_2
+%define ctr_check tmp_3 ; counter block overflow check
+%define bytes_to_crc tmp_4 ; number of bytes to crc ( < num_bytes)
+
+%define ethernet_fcs tmp_6 ; not used together with tmp3
+%define tmp2 tmp_5
+%define tmp3 tmp_6
+
+%define write_back_crc tmp_7
+%define decrypt_not_done tmp_7
+
+;;; ============================================================================
+;;; Does all AES encryption rounds
+%macro AES_ENC_ROUNDS 3
+%define %%KP %1 ; [in] pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] max rounds (128bit: 10, 12, 14)
+%define %%BLOCK %3 ; [in/out] XMM with encrypted block
+
+%assign round 0
+ vpxor %%BLOCK, %%BLOCK, [%%KP + (round * 16)]
+
+%rep (%%N_ROUNDS - 1)
+%assign round (round + 1)
+ vaesenc %%BLOCK, %%BLOCK, [%%KP + (round * 16)]
+%endrep
+
+%assign round (round + 1)
+ vaesenclast %%BLOCK, %%BLOCK, [%%KP + (round * 16)]
+
+%endmacro
+
+;;; ============================================================================
+;;; Does all AES encryption rounds on 4 blocks
+%macro AES_ENC_ROUNDS_4 7
+%define %%KP %1 ; [in] pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] max rounds (128bit: 10, 12, 14)
+%define %%BLOCK1 %3 ; [in/out] XMM with encrypted block
+%define %%BLOCK2 %4 ; [in/out] XMM with encrypted block
+%define %%BLOCK3 %5 ; [in/out] XMM with encrypted block
+%define %%BLOCK4 %6 ; [in/out] XMM with encrypted block
+%define %%XT1 %7 ; [clobbered] temporary XMM register
+
+%assign round 0
+ vmovdqa %%XT1, [%%KP + (round * 16)]
+ vpxor %%BLOCK1, %%BLOCK1, %%XT1
+ vpxor %%BLOCK2, %%BLOCK2, %%XT1
+ vpxor %%BLOCK3, %%BLOCK3, %%XT1
+ vpxor %%BLOCK4, %%BLOCK4, %%XT1
+
+%rep (%%N_ROUNDS - 1)
+%assign round (round + 1)
+ vmovdqa %%XT1, [%%KP + (round * 16)]
+ vaesenc %%BLOCK1, %%BLOCK1, %%XT1
+ vaesenc %%BLOCK2, %%BLOCK2, %%XT1
+ vaesenc %%BLOCK3, %%BLOCK3, %%XT1
+ vaesenc %%BLOCK4, %%BLOCK4, %%XT1
+%endrep
+
+%assign round (round + 1)
+ vmovdqa %%XT1, [%%KP + (round * 16)]
+ vaesenclast %%BLOCK1, %%BLOCK1, %%XT1
+ vaesenclast %%BLOCK2, %%BLOCK2, %%XT1
+ vaesenclast %%BLOCK3, %%BLOCK3, %%XT1
+ vaesenclast %%BLOCK4, %%BLOCK4, %%XT1
+%endmacro
+
+;;; ============================================================================
+;;; CRC multiply before XOR against data block
+%macro CRC_CLMUL 3
+%define %%XCRC_IN_OUT %1 ; [in/out] XMM with CRC (can be anything if "no_crc" below)
+%define %%XCRC_MUL %2 ; [in] XMM with CRC constant (can be anything if "no_crc" below)
+%define %%XTMP %3 ; [clobbered] temporary XMM
+
+ vpclmulqdq %%XTMP, %%XCRC_IN_OUT, %%XCRC_MUL, 0x01
+ vpclmulqdq %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%XCRC_MUL, 0x10
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%XTMP
+%endmacro
+
+;;; ============================================================================
+;;; PON stitched algorithm round on a single AES block (16 bytes):
+;;; AES-CTR (optional, depending on %%CIPH)
+;;; - prepares counter block
+;;; - encrypts counter block
+;;; - loads text
+;;; - xor's text against encrypted blocks
+;;; - stores cipher text
+;;; BIP
+;;; - BIP update on 4 x 32-bits
+;;; CRC32
+;;; - CRC32 calculation
+;;; Note: via selection of no_crc, no_bip, no_load, no_store different macro
+;;; behaviour can be achieved to match needs of the overall algorithm.
+%macro DO_PON 15
+%define %%KP %1 ; [in] GP, pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] number of AES rounds (10, 12 or 14)
+%define %%CTR %3 ; [in/out] XMM with counter block
+%define %%INP %4 ; [in/out] GP with input text pointer or "no_load"
+%define %%OUTP %5 ; [in/out] GP with output text pointer or "no_store"
+%define %%XBIP_IN_OUT %6 ; [in/out] XMM with BIP value or "no_bip"
+%define %%XCRC_IN_OUT %7 ; [in/out] XMM with CRC (can be anything if "no_crc" below)
+%define %%XCRC_MUL %8 ; [in] XMM with CRC constant (can be anything if "no_crc" below)
+%define %%TXMM0 %9 ; [clobbered|out] XMM temporary or data out (no_store)
+%define %%TXMM1 %10 ; [clobbered|in] XMM temporary or data in (no_load)
+%define %%TXMM2 %11 ; [clobbered] XMM temporary
+%define %%CRC_TYPE %12 ; [in] "first_crc" or "next_crc" or "no_crc"
+%define %%DIR %13 ; [in] "ENC" or "DEC"
+%define %%CIPH %14 ; [in] "CTR" or "NO_CTR"
+%define %%CTR_CHECK %15 ; [in/out] GP with 64bit counter (to identify overflow)
+
+%ifidn %%CIPH, CTR
+ ;; prepare counter blocks for encryption
+ vpshufb %%TXMM0, %%CTR, [rel byteswap_const]
+ ;; perform 1 increment on whole 128 bits
+ add %%CTR_CHECK, 1
+ jc %%_ctr_overflow
+ vpaddq %%CTR, %%CTR, [rel ddq_add_1]
+ jmp %%_ctr_overflow_done
+%%_ctr_overflow:
+ vpaddq %%CTR, %%CTR, [rel ddq_add_1_1]
+%%_ctr_overflow_done:
+%endif
+
+ ;; CRC calculation
+%ifidn %%CRC_TYPE, next_crc
+ ;; CRC_MUL macro could be used here but its xor affects
+ ;; performance (blocks cipher xor's) so doing CLMUL
+ ;; only here and xor is done after the cipher.
+ vpclmulqdq %%TXMM2, %%XCRC_IN_OUT, %%XCRC_MUL, 0x01
+ vpclmulqdq %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%XCRC_MUL, 0x10
+%endif
+
+%ifnidn %%INP, no_load
+ vmovdqu %%TXMM1, [%%INP]
+%endif
+
+%ifidn %%CIPH, CTR
+ ;; AES rounds
+ AES_ENC_ROUNDS %%KP, %%N_ROUNDS, %%TXMM0
+
+ ;; xor plaintext/ciphertext against encrypted counter blocks
+ vpxor %%TXMM0, %%TXMM0, %%TXMM1
+%else ;; CIPH = NO_CTR
+ ;; register copy is needed as no_load/no_store options need it
+ vmovdqa %%TXMM0, %%TXMM1
+%endif ;; CIPH = CTR
+
+%ifnidn %%CRC_TYPE, no_crc
+%ifidn %%CRC_TYPE, next_crc
+ ;; Finish split CRC_MUL() operation
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXMM2
+%endif
+%ifidn %%CIPH, CTR
+ ;; CRC calculation for ENCRYPTION/DECRYPTION
+ ;; - always XOR against plaintext block
+%ifidn %%DIR, ENC
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXMM1
+%else
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXMM0
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ ;; CRC calculation for NO CIPHER option
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXMM1
+%endif ;; CIPH = CTR
+%endif ;; CRC_TYPE != NO_CRC
+
+ ;; store the result in the output buffer
+%ifnidn %%OUTP, no_store
+%ifidn %%CIPH, CTR
+ vmovdqu [%%OUTP], %%TXMM0
+%else ;; CIPH = NO_CTR
+ vmovdqu [%%OUTP], %%TXMM1
+%endif ;; CIPH = CTR
+%endif
+
+ ;; update BIP value - always use cipher text for BIP
+%ifnidn %%XBIP_IN_OUT, no_bip
+%ifidn %%CIPH, CTR
+%ifidn %%DIR, ENC
+ vpxor %%XBIP_IN_OUT, %%XBIP_IN_OUT, %%TXMM0
+%else
+ vpxor %%XBIP_IN_OUT, %%XBIP_IN_OUT, %%TXMM1
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ vpxor %%XBIP_IN_OUT, %%XBIP_IN_OUT, %%TXMM1
+%endif ;; CIPH = CTR
+%endif ;; !NO_BIP
+
+ ;; increment in/out pointers
+%ifnidn %%INP, no_load
+ add %%INP, 16
+%endif
+%ifnidn %%OUTP, no_store
+ add %%OUTP, 16
+%endif
+%endmacro ; DO_PON
+
+;;; ============================================================================
+;;; PON stitched algorithm round on a single AES block (16 bytes):
+;;; AES-CTR (optional, depending on %%CIPH)
+;;; - prepares counter block
+;;; - encrypts counter block
+;;; - loads text
+;;; - xor's text against encrypted blocks
+;;; - stores cipher text
+;;; BIP
+;;; - BIP update on 4 x 32-bits
+;;; CRC32
+;;; - CRC32 calculation
+;;; Note: via selection of no_crc, no_bip, no_load, no_store different macro
+;;; behaviour can be achieved to match needs of the overall algorithm.
+%macro DO_PON_4 23
+%define %%KP %1 ; [in] GP, pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] number of AES rounds (10, 12 or 14)
+%define %%CTR %3 ; [in/out] XMM with counter block
+%define %%INP %4 ; [in/out] GP with input text pointer or "no_load"
+%define %%OUTP %5 ; [in/out] GP with output text pointer or "no_store"
+%define %%XBIP_IN_OUT %6 ; [in/out] XMM with BIP value or "no_bip"
+%define %%XCRC_IN_OUT %7 ; [in/out] XMM with CRC (can be anything if "no_crc" below)
+%define %%XCRC_MUL %8 ; [in] XMM with CRC constant (can be anything if "no_crc" below)
+%define %%T0 %9 ; [clobbered] XMM temporary
+%define %%T1 %10 ; [clobbered] XMM temporary
+%define %%T2 %11 ; [clobbered] XMM temporary
+%define %%T3 %12 ; [clobbered] XMM temporary
+%define %%T4 %13 ; [clobbered] XMM temporary
+%define %%T5 %14 ; [clobbered] XMM temporary
+%define %%T6 %15 ; [clobbered] XMM temporary
+%define %%T7 %16 ; [clobbered] XMM temporary
+%define %%T8 %17 ; [clobbered] XMM temporary
+%define %%T9 %18 ; [clobbered] XMM temporary
+%define %%T10 %19 ; [clobbered] XMM temporary
+%define %%CRC_TYPE %20 ; [in] "first_crc" or "next_crc" or "no_crc"
+%define %%DIR %21 ; [in] "ENC" or "DEC"
+%define %%CIPH %22 ; [in] "CTR" or "NO_CTR"
+%define %%CTR_CHECK %23 ; [in/out] GP with 64bit counter (to identify overflow)
+
+%define %%CTR1 %%T3
+%define %%CTR2 %%T4
+%define %%CTR3 %%T5
+%define %%CTR4 %%T6
+
+%define %%TXT1 %%T7
+%define %%TXT2 %%T8
+%define %%TXT3 %%T9
+%define %%TXT4 %%T10
+
+%ifidn %%CIPH, CTR
+ ;; prepare counter blocks for encryption
+ vmovdqa %%T0, [rel ddq_add_1]
+ vmovdqa %%T2, [rel byteswap_const]
+
+ ;; CTR1: copy saved CTR value as CTR1
+ vmovdqa %%CTR1, %%CTR
+
+ cmp %%CTR_CHECK, 0xffff_ffff_ffff_ffff - 4
+ ja %%_ctr_will_overflow
+
+ ;; case in which 64-bit counter will not overflow
+ vpaddq %%CTR2, %%CTR1, %%T0
+ vpaddq %%CTR3, %%CTR2, %%T0
+ vpaddq %%CTR4, %%CTR3, %%T0
+ vpaddq %%CTR, %%CTR4, %%T0
+ vpshufb %%CTR1, %%CTR1, %%T2
+ vpshufb %%CTR2, %%CTR2, %%T2
+ vpshufb %%CTR3, %%CTR3, %%T2
+ vpshufb %%CTR4, %%CTR4, %%T2
+ add %%CTR_CHECK, 4
+ jmp %%_ctr_update_done
+
+%%_ctr_will_overflow:
+ vmovdqa %%T1, [rel ddq_add_1_1]
+ ;; CTR2: perform 1 increment on whole 128 bits
+ add %%CTR_CHECK, 1
+ jc %%_ctr2_overflow
+ vpaddq %%CTR2, %%CTR1, %%T0
+ jmp %%_ctr2_overflow_done
+%%_ctr2_overflow:
+ vpaddq %%CTR2, %%CTR1, %%T1
+%%_ctr2_overflow_done:
+ vpshufb %%CTR1, %%CTR1, %%T2
+
+ ;; CTR3: perform 1 increment on whole 128 bits
+ add %%CTR_CHECK, 1
+ jc %%_ctr3_overflow
+ vpaddq %%CTR3, %%CTR2, %%T0
+ jmp %%_ctr3_overflow_done
+%%_ctr3_overflow:
+ vpaddq %%CTR3, %%CTR2, %%T1
+%%_ctr3_overflow_done:
+ vpshufb %%CTR2, %%CTR2, %%T2
+
+ ;; CTR4: perform 1 increment on whole 128 bits
+ add %%CTR_CHECK, 1
+ jc %%_ctr4_overflow
+ vpaddq %%CTR4, %%CTR3, %%T0
+ jmp %%_ctr4_overflow_done
+%%_ctr4_overflow:
+ vpaddq %%CTR4, %%CTR3, %%T1
+%%_ctr4_overflow_done:
+ vpshufb %%CTR3, %%CTR3, %%T2
+
+ ;; CTR: perform 1 increment on whole 128 bits (for the next iteration)
+ add %%CTR_CHECK, 1
+ jc %%_ctr_overflow
+ vpaddq %%CTR, %%CTR4, %%T0
+ jmp %%_ctr_overflow_done
+%%_ctr_overflow:
+ vpaddq %%CTR, %%CTR4, %%T1
+%%_ctr_overflow_done:
+ vpshufb %%CTR4, %%CTR4, %%T2
+%%_ctr_update_done:
+%endif
+
+%ifidn %%CRC_TYPE, next_crc
+ ;; CRC_MUL macro could be used here but its xor affects
+ ;; performance (blocks cipher xor's) so doing CLMUL
+ ;; only here and xor is done after the cipher.
+ vpclmulqdq %%T2, %%XCRC_IN_OUT, %%XCRC_MUL, 0x01
+ vpclmulqdq %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%XCRC_MUL, 0x10
+%endif
+
+ ;; load plaintext/ciphertext
+ vmovdqu %%TXT1, [%%INP]
+ vmovdqu %%TXT2, [%%INP + 16]
+ vmovdqu %%TXT3, [%%INP + 32]
+ vmovdqu %%TXT4, [%%INP + 48]
+
+%ifidn %%CIPH, CTR
+ AES_ENC_ROUNDS_4 %%KP, %%N_ROUNDS, %%CTR1, %%CTR2, %%CTR3, %%CTR4, %%T0
+
+ ;; xor plaintext/ciphertext against encrypted counter blocks
+ vpxor %%CTR1, %%CTR1, %%TXT1
+ vpxor %%CTR2, %%CTR2, %%TXT2
+ vpxor %%CTR3, %%CTR3, %%TXT3
+ vpxor %%CTR4, %%CTR4, %%TXT4
+%endif ;; CIPH = CTR
+
+%ifidn %%CRC_TYPE, next_crc
+ ;; Finish split CRC_MUL() operation
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%T2
+%endif
+%ifidn %%CIPH, CTR
+%ifidn %%DIR, ENC
+ ;; CRC calculation for ENCRYPTION (blocks 1 & 2)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT1
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT2
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+%else
+ ;; CRC calculation for DECRYPTION (blocks 1 & 2)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%CTR1
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%CTR2
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ ;; CRC calculation for NO CIPHER option (blocks 1 & 2)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT1
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT2
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+%endif ;; CIPH = CTR
+
+ ;; store ciphertext/plaintext
+%ifidn %%CIPH, CTR
+ vmovdqu [%%OUTP], %%CTR1
+ vmovdqu [%%OUTP + 16], %%CTR2
+ vmovdqu [%%OUTP + 32], %%CTR3
+ vmovdqu [%%OUTP + 48], %%CTR4
+%else ;; CIPH = NO_CTR
+ vmovdqu [%%OUTP], %%TXT1
+ vmovdqu [%%OUTP + 16], %%TXT2
+ vmovdqu [%%OUTP + 32], %%TXT3
+ vmovdqu [%%OUTP + 48], %%TXT4
+%endif ;; CIPH = CTR
+
+ ;; update BIP value
+%ifidn %%CIPH, CTR
+ ;; - always use ciphertext for BIP
+%ifidn %%DIR, ENC
+ vpxor %%T0, %%CTR1, %%CTR2
+ vpxor %%T1, %%CTR3, %%CTR4
+%else
+ vpxor %%T0, %%TXT1, %%TXT2
+ vpxor %%T1, %%TXT3, %%TXT4
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ vpxor %%T0, %%TXT1, %%TXT2
+ vpxor %%T1, %%TXT3, %%TXT4
+%endif ;; CIPH = CTR
+ vpxor %%XBIP_IN_OUT, %%XBIP_IN_OUT, %%T0
+ vpxor %%XBIP_IN_OUT, %%XBIP_IN_OUT, %%T1
+
+ ;; increment in/out pointers
+ add %%INP, 64
+ add %%OUTP, 64
+
+%ifidn %%CIPH, CTR
+%ifidn %%DIR, ENC
+ ;; CRC calculation for ENCRYPTION (blocks 3 & 4)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT3
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT4
+%else
+ ;; CRC calculation for DECRYPTION (blocks 3 & 4)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%CTR3
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%CTR4
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ ;; CRC calculation for NO CIPHER option (blocks 3 & 4)
+ ;; - XOR CRC against plaintext block
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT3
+
+ CRC_CLMUL %%XCRC_IN_OUT, %%XCRC_MUL, %%T2
+ vpxor %%XCRC_IN_OUT, %%XCRC_IN_OUT, %%TXT4
+%endif ;; CIPH = CTR
+
+%endmacro ; DO_PON_4
+
+;;; ============================================================================
+;;; CIPHER and BIP specified number of bytes
+%macro CIPHER_BIP_REST 14
+%define %%NUM_BYTES %1 ; [in/clobbered] number of bytes to cipher
+%define %%DIR %2 ; [in] "ENC" or "DEC"
+%define %%CIPH %3 ; [in] "CTR" or "NO_CTR"
+%define %%PTR_IN %4 ; [in/clobbered] GPR pointer to input buffer
+%define %%PTR_OUT %5 ; [in/clobbered] GPR pointer to output buffer
+%define %%PTR_KEYS %6 ; [in] GPR pointer to expanded keys
+%define %%XBIP_IN_OUT %7 ; [in/out] XMM 128-bit BIP state
+%define %%XCTR_IN_OUT %8 ; [in/out] XMM 128-bit AES counter block
+%define %%XMMT1 %9 ; [clobbered] temporary XMM
+%define %%XMMT2 %10 ; [clobbered] temporary XMM
+%define %%XMMT3 %11 ; [clobbered] temporary XMM
+%define %%CTR_CHECK %12 ; [in/out] GP with 64bit counter (to identify overflow)
+%define %%GPT1 %13 ; [clobbered] temporary GP
+%define %%GPT2 %14 ; [clobbered] temporary GP
+
+ align 16
+%%_cipher_last_blocks:
+ cmp %%NUM_BYTES, 16
+ jb %%_partial_block_left
+
+ DO_PON %%PTR_KEYS, NUM_AES_ROUNDS, %%XCTR_IN_OUT, %%PTR_IN, %%PTR_OUT, %%XBIP_IN_OUT, \
+ no_crc, no_crc, %%XMMT1, %%XMMT2, %%XMMT3, no_crc, %%DIR, %%CIPH, %%CTR_CHECK
+ sub %%NUM_BYTES, 16
+ jz %%_bip_done
+ jmp %%_cipher_last_blocks
+
+%%_partial_block_left:
+ simd_load_avx_15_1 %%XMMT2, %%PTR_IN, %%NUM_BYTES
+
+ ;; DO_PON() is not loading nor storing the data in this case:
+ ;; XMMT2 = data in
+ ;; XMMT1 = data out
+ DO_PON %%PTR_KEYS, NUM_AES_ROUNDS, %%XCTR_IN_OUT, no_load, no_store, no_bip, \
+ no_crc, no_crc, %%XMMT1, %%XMMT2, %%XMMT3, no_crc, %%DIR, %%CIPH, %%CTR_CHECK
+
+ ;; bip update for partial block (mask out bytes outside the message)
+ lea %%GPT1, [rel mask_out_top_bytes + 16]
+ sub %%GPT1, %%NUM_BYTES
+ vmovdqu %%XMMT3, [%%GPT1]
+ ;; put masked cipher text into XMMT2 for BIP update
+%ifidn %%DIR, ENC
+ vpand %%XMMT2, %%XMMT1, %%XMMT3
+%else
+ vpand %%XMMT2, %%XMMT2, %%XMMT3
+%endif
+ vpxor %%XBIP_IN_OUT, %%XMMT2
+
+ ;; store partial bytes in the output buffer
+ simd_store_avx_15 %%PTR_OUT, %%XMMT1, %%NUM_BYTES, %%GPT1, %%GPT2
+
+%%_bip_done:
+%endmacro ; CIPHER_BIP_REST
+
+;; =============================================================================
+;; Barrett reduction from 128-bits to 32-bits modulo Ethernet FCS polynomial
+
+%macro CRC32_REDUCE_128_TO_32 5
+%define %%CRC %1 ; [out] GP to store 32-bit Ethernet FCS value
+%define %%XCRC %2 ; [in/clobbered] XMM with CRC
+%define %%XT1 %3 ; [clobbered] temporary xmm register
+%define %%XT2 %4 ; [clobbered] temporary xmm register
+%define %%XT3 %5 ; [clobbered] temporary xmm register
+
+%define %%XCRCKEY %%XT3
+
+ ;; compute crc of a 128-bit value
+ vmovdqa %%XCRCKEY, [rel rk5]
+
+ ;; 64b fold
+ vpclmulqdq %%XT1, %%XCRC, %%XCRCKEY, 0x00
+ vpsrldq %%XCRC, %%XCRC, 8
+ vpxor %%XCRC, %%XCRC, %%XT1
+
+ ;; 32b fold
+ vpslldq %%XT1, %%XCRC, 4
+ vpclmulqdq %%XT1, %%XT1, %%XCRCKEY, 0x10
+ vpxor %%XCRC, %%XCRC, %%XT1
+
+%%_crc_barrett:
+ ;; Barrett reduction
+ vpand %%XCRC, [rel mask2]
+ vmovdqa %%XT1, %%XCRC
+ vmovdqa %%XT2, %%XCRC
+ vmovdqa %%XCRCKEY, [rel rk7]
+
+ vpclmulqdq %%XCRC, %%XCRCKEY, 0x00
+ vpxor %%XCRC, %%XT2
+ vpand %%XCRC, [rel mask]
+ vmovdqa %%XT2, %%XCRC
+ vpclmulqdq %%XCRC, %%XCRCKEY, 0x10
+ vpxor %%XCRC, %%XT2
+ vpxor %%XCRC, %%XT1
+ vpextrd DWORD(%%CRC), %%XCRC, 2 ; 32-bit CRC value
+ not DWORD(%%CRC)
+%endmacro
+
+;; =============================================================================
+;; Barrett reduction from 128-bits to 32-bits modulo 0x53900000 polynomial
+
+%macro HEC_REDUCE_128_TO_32 4
+%define %%XMM_IN_OUT %1 ; [in/out] xmm register with data in and out
+%define %%XT1 %2 ; [clobbered] temporary xmm register
+%define %%XT2 %3 ; [clobbered] temporary xmm register
+%define %%XT3 %4 ; [clobbered] temporary xmm register
+
+%define %%K3_Q %%XT1
+%define %%P_RES %%XT2
+%define %%XTMP %%XT3
+
+ ;; 128 to 64 bit reduction
+ vmovdqa %%K3_Q, [k3_q]
+ vmovdqa %%P_RES, [p_res]
+
+ vpclmulqdq %%XTMP, %%XMM_IN_OUT, %%K3_Q, 0x01 ; K3
+ vpxor %%XTMP, %%XTMP, %%XMM_IN_OUT
+
+ vpclmulqdq %%XTMP, %%XTMP, %%K3_Q, 0x01 ; K3
+ vpxor %%XMM_IN_OUT, %%XTMP, %%XMM_IN_OUT
+
+ vpand %%XMM_IN_OUT, [rel mask_out_top_64bits]
+
+ ;; 64 to 32 bit reduction
+ vpsrldq %%XTMP, %%XMM_IN_OUT, 4
+ vpclmulqdq %%XTMP, %%XTMP, %%K3_Q, 0x10 ; Q
+ vpxor %%XTMP, %%XTMP, %%XMM_IN_OUT
+ vpsrldq %%XTMP, %%XTMP, 4
+
+ vpclmulqdq %%XTMP, %%XTMP, %%P_RES, 0x00 ; P
+ vpxor %%XMM_IN_OUT, %%XTMP, %%XMM_IN_OUT
+%endmacro
+
+;; =============================================================================
+;; Barrett reduction from 64-bits to 32-bits modulo 0x53900000 polynomial
+
+%macro HEC_REDUCE_64_TO_32 4
+%define %%XMM_IN_OUT %1 ; [in/out] xmm register with data in and out
+%define %%XT1 %2 ; [clobbered] temporary xmm register
+%define %%XT2 %3 ; [clobbered] temporary xmm register
+%define %%XT3 %4 ; [clobbered] temporary xmm register
+
+%define %%K3_Q %%XT1
+%define %%P_RES %%XT2
+%define %%XTMP %%XT3
+
+ vmovdqa %%K3_Q, [k3_q]
+ vmovdqa %%P_RES, [p_res]
+
+ ;; 64 to 32 bit reduction
+ vpsrldq %%XTMP, %%XMM_IN_OUT, 4
+ vpclmulqdq %%XTMP, %%XTMP, %%K3_Q, 0x10 ; Q
+ vpxor %%XTMP, %%XTMP, %%XMM_IN_OUT
+ vpsrldq %%XTMP, %%XTMP, 4
+
+ vpclmulqdq %%XTMP, %%XTMP, %%P_RES, 0x00 ; P
+ vpxor %%XMM_IN_OUT, %%XTMP, %%XMM_IN_OUT
+%endmacro
+
+;; =============================================================================
+;; HEC compute and header update for 32-bit XGEM headers
+%macro HEC_COMPUTE_32 6
+%define %%HEC_IN_OUT %1 ; [in/out] GP register with HEC in LE format
+%define %%GT1 %2 ; [clobbered] temporary GP register
+%define %%XT1 %4 ; [clobbered] temporary xmm register
+%define %%XT2 %5 ; [clobbered] temporary xmm register
+%define %%XT3 %6 ; [clobbered] temporary xmm register
+%define %%XT4 %7 ; [clobbered] temporary xmm register
+
+ mov DWORD(%%GT1), DWORD(%%HEC_IN_OUT)
+ ;; shift out 13 bits of HEC value for CRC computation
+ shr DWORD(%%GT1), 13
+
+ ;; mask out current HEC value to merge with an updated HEC at the end
+ and DWORD(%%HEC_IN_OUT), 0xffff_e000
+
+ ;; prepare the message for CRC computation
+ vmovd %%XT1, DWORD(%%GT1)
+ vpslldq %%XT1, 4 ; shift left by 32-bits
+
+ HEC_REDUCE_64_TO_32 %%XT1, %%XT2, %%XT3, %%XT4
+
+ ;; extract 32-bit value
+ ;; - normally perform 20 bit shift right but bit 0 is a parity bit
+ vmovd DWORD(%%GT1), %%XT1
+ shr DWORD(%%GT1), (20 - 1)
+
+ ;; merge header bytes with updated 12-bit CRC value and
+ ;; compute parity
+ or DWORD(%%GT1), DWORD(%%HEC_IN_OUT)
+ popcnt DWORD(%%HEC_IN_OUT), DWORD(%%GT1)
+ and DWORD(%%HEC_IN_OUT), 1
+ or DWORD(%%HEC_IN_OUT), DWORD(%%GT1)
+%endmacro
+
+;; =============================================================================
+;; HEC compute and header update for 64-bit XGEM headers
+%macro HEC_COMPUTE_64 6
+%define %%HEC_IN_OUT %1 ; [in/out] GP register with HEC in LE format
+%define %%GT1 %2 ; [clobbered] temporary GP register
+%define %%XT1 %3 ; [clobbered] temporary xmm register
+%define %%XT2 %4 ; [clobbered] temporary xmm register
+%define %%XT3 %5 ; [clobbered] temporary xmm register
+%define %%XT4 %6 ; [clobbered] temporary xmm register
+
+ mov %%GT1, %%HEC_IN_OUT
+ ;; shift out 13 bits of HEC value for CRC computation
+ shr %%GT1, 13
+
+ ;; mask out current HEC value to merge with an updated HEC at the end
+ and %%HEC_IN_OUT, 0xffff_ffff_ffff_e000
+
+ ;; prepare the message for CRC computation
+ vmovq %%XT1, %%GT1
+ vpslldq %%XT1, 4 ; shift left by 32-bits
+
+ HEC_REDUCE_128_TO_32 %%XT1, %%XT2, %%XT3, %%XT4
+
+ ;; extract 32-bit value
+ ;; - normally perform 20 bit shift right but bit 0 is a parity bit
+ vmovd DWORD(%%GT1), %%XT1
+ shr DWORD(%%GT1), (20 - 1)
+
+ ;; merge header bytes with updated 12-bit CRC value and
+ ;; compute parity
+ or %%GT1, %%HEC_IN_OUT
+ popcnt %%HEC_IN_OUT, %%GT1
+ and %%HEC_IN_OUT, 1
+ or %%HEC_IN_OUT, %%GT1
+%endmacro
+
+;;; ============================================================================
+;;; PON stitched algorithm of AES128-CTR, CRC and BIP
+;;; - this is master macro that implements encrypt/decrypt API
+;;; - calls other macros and directly uses registers
+;;; defined at the top of the file
+%macro AES128_CTR_PON 2
+%define %%DIR %1 ; [in] direction "ENC" or "DEC"
+%define %%CIPH %2 ; [in] cipher "CTR" or "NO_CTR"
+
+ push r12
+ push r13
+ push r14
+%ifndef LINUX
+ push r15
+%endif
+
+%ifidn %%DIR, ENC
+ ;; by default write back CRC for encryption
+ mov DWORD(write_back_crc), 1
+%else
+ ;; mark decryption as finished
+ mov DWORD(decrypt_not_done), 1
+%endif
+ ;; START BIP (and update HEC if encrypt direction)
+ ;; - load XGEM header (8 bytes) for BIP (not part of encrypted payload)
+ ;; - convert it into LE
+ ;; - update HEC field in the header
+ ;; - convert it into BE
+ ;; - store back the header (with updated HEC)
+ ;; - start BIP
+ ;; (free to use tmp_1, tmp2 and tmp_3 at this stage)
+ mov tmp_2, [job + _src]
+ add tmp_2, [job + _hash_start_src_offset_in_bytes]
+ mov tmp_3, [tmp_2]
+%ifidn %%DIR, ENC
+ bswap tmp_3 ; go to LE
+ HEC_COMPUTE_64 tmp_3, tmp_1, xtmp1, xtmp2, xtmp3, xtmp4
+ mov bytes_to_crc, tmp_3
+ shr bytes_to_crc, (48 + 2) ; PLI = MSB 14 bits
+ bswap tmp_3 ; go back to BE
+ mov [tmp_2], tmp_3
+ vmovq xbip, tmp_3
+%else
+ vmovq xbip, tmp_3
+ mov bytes_to_crc, tmp_3
+ bswap bytes_to_crc ; go to LE
+ shr bytes_to_crc, (48 + 2) ; PLI = MSB 14 bits
+%endif
+ cmp bytes_to_crc, 4
+ ja %%_crc_not_zero
+ ;; XGEM payload shorter or equal to 4 bytes
+%ifidn %%DIR, ENC
+ ;; On encryption, do not write Ethernet FCS back into the message
+ xor DWORD(write_back_crc), DWORD(write_back_crc)
+%else
+ ;; Mark decryption as not finished
+ ;; - Ethernet FCS is not computed
+ ;; - decrypt + BIP to be done at the end
+ xor DWORD(decrypt_not_done), DWORD(decrypt_not_done)
+%endif
+ mov DWORD(bytes_to_crc), 4 ; it will be zero after the next line (avoid jmp)
+%%_crc_not_zero:
+ sub bytes_to_crc, 4 ; subtract size of the CRC itself
+
+%ifidn %%CIPH, CTR
+ ;; - read 16 bytes of IV
+ ;; - convert to little endian format
+ ;; - save least significant 8 bytes in GP register for overflow check
+ mov tmp, [job + _iv]
+ vmovdqu xcounter, [tmp]
+ vpshufb xcounter, [rel byteswap_const]
+ vmovq ctr_check, xcounter
+%endif
+
+ ;; get input buffer (after XGEM header)
+ mov p_in, [job + _src]
+ add p_in, [job + _cipher_start_src_offset_in_bytes]
+
+ ;; get output buffer
+ mov p_out, [job + _dst]
+
+%ifidn %%CIPH, CTR
+ ;; get key pointers
+ mov p_keys, [job + _aes_enc_key_expanded]
+%endif
+
+ ;; initial CRC value
+ vmovdqa xcrc, [rel init_crc_value]
+
+ ;; load CRC constants
+ vmovdqa xcrckey, [rel rk1] ; rk1 and rk2 in xcrckey
+
+ ;; get number of bytes to cipher
+%ifidn %%CIPH, CTR
+ mov num_bytes, [job + _msg_len_to_cipher_in_bytes]
+%else
+ ;; Message length to cipher is 0
+ ;; - length is obtained from message length to hash (BIP) minus XGEM header size
+ mov num_bytes, [job + _msg_len_to_hash_in_bytes]
+ sub num_bytes, 8
+%endif
+ or bytes_to_crc, bytes_to_crc
+ jz %%_crc_done
+
+ cmp bytes_to_crc, 32
+ jae %%_at_least_32_bytes
+
+%ifidn %%DIR, DEC
+ ;; decrypt the buffer first
+ mov tmp, num_bytes
+ CIPHER_BIP_REST tmp, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+
+ ;; correct in/out pointers - go back to start of the buffers
+ mov tmp, num_bytes
+ and tmp, -16 ; partial block handler doesn't increment pointers
+ sub p_in, tmp
+ sub p_out, tmp
+%endif ; DECRYPTION
+
+ ;; less than 32 bytes
+ cmp bytes_to_crc, 16
+ je %%_exact_16_left
+ jl %%_less_than_16_left
+ ;; load the plaintext
+%ifidn %%DIR, ENC
+ vmovdqu xtmp1, [p_in]
+%else
+ vmovdqu xtmp1, [p_out]
+%endif
+ vpxor xcrc, xtmp1 ; xor the initial crc value
+ jmp %%_crc_two_xmms
+
+%%_exact_16_left:
+%ifidn %%DIR, ENC
+ vmovdqu xtmp1, [p_in]
+%else
+ vmovdqu xtmp1, [p_out]
+%endif
+ vpxor xcrc, xtmp1 ; xor the initial crc value
+ jmp %%_128_done
+
+%%_less_than_16_left:
+%ifidn %%DIR, ENC
+ simd_load_avx_15_1 xtmp1, p_in, bytes_to_crc
+%else
+ simd_load_avx_15_1 xtmp1, p_out, bytes_to_crc
+%endif
+ vpxor xcrc, xtmp1 ; xor the initial crc value
+
+ lea tmp, [rel pshufb_shf_table]
+ vmovdqu xtmp1, [tmp + bytes_to_crc]
+ vpshufb xcrc, xtmp1
+ jmp %%_128_done
+
+%%_at_least_32_bytes:
+ cmp bytes_to_crc, 64
+ jb %%_crc_below_64_bytes
+
+ DO_PON_4 p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, xtmp4, xtmp5, xtmp6, \
+ xtmp7, xtmp8, xtmp9, xtmp10, xtmp11, first_crc, %%DIR, \
+ %%CIPH, ctr_check
+ sub num_bytes, 64
+ sub bytes_to_crc, 64
+%ifidn %%DIR, ENC
+ jz %%_128_done
+%endif
+
+ align 16
+%%_main_loop_64:
+ cmp bytes_to_crc, 64
+ jb %%_main_loop
+
+ DO_PON_4 p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, xtmp4, xtmp5, xtmp6, \
+ xtmp7, xtmp8, xtmp9, xtmp10, xtmp11, next_crc, %%DIR, \
+ %%CIPH, ctr_check
+ sub num_bytes, 64
+ sub bytes_to_crc, 64
+%ifidn %%DIR, ENC
+ jz %%_128_done
+%endif
+ jmp %%_main_loop_64
+
+%%_crc_below_64_bytes:
+ DO_PON p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, first_crc, %%DIR, \
+ %%CIPH, ctr_check
+ sub num_bytes, 16
+ sub bytes_to_crc, 16
+
+ align 16
+%%_main_loop:
+ cmp bytes_to_crc, 16
+ jb %%_exit_loop
+ DO_PON p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, next_crc, %%DIR, \
+ %%CIPH, ctr_check
+ sub num_bytes, 16
+ sub bytes_to_crc, 16
+%ifidn %%DIR, ENC
+ jz %%_128_done
+%endif
+ jmp %%_main_loop
+
+%%_exit_loop:
+
+%ifidn %%DIR, DEC
+ ;; decrypt rest of the message including CRC and optional padding
+ mov tmp, num_bytes
+
+ CIPHER_BIP_REST tmp, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+
+ mov tmp, num_bytes ; correct in/out pointers - to point before cipher & BIP
+ and tmp, -16 ; partial block handler doesn't increment pointers
+ sub p_in, tmp
+ sub p_out, tmp
+
+ or bytes_to_crc, bytes_to_crc
+ jz %%_128_done
+%endif ; DECRYPTION
+
+ ;; Partial bytes left - complete CRC calculation
+%%_crc_two_xmms:
+ lea tmp, [rel pshufb_shf_table]
+ vmovdqu xtmp2, [tmp + bytes_to_crc]
+ ;; @note: in case of in-place operation (default) this load is
+ ;; creating store-to-load problem.
+ ;; However, there is no easy way to address it at the moment.
+%ifidn %%DIR, ENC
+ vmovdqu xtmp1, [p_in - 16 + bytes_to_crc] ; xtmp1 = data for CRC
+%else
+ vmovdqu xtmp1, [p_out - 16 + bytes_to_crc] ; xtmp1 = data for CRC
+%endif
+ vmovdqa xtmp3, xcrc
+ vpshufb xcrc, xtmp2 ; top num_bytes with LSB xcrc
+ vpxor xtmp2, [rel mask3]
+ vpshufb xtmp3, xtmp2 ; bottom (16 - num_bytes) with MSB xcrc
+
+ ;; data bytes_to_crc (top) blended with MSB bytes of CRC (bottom)
+ vpblendvb xtmp3, xtmp1, xtmp2
+
+ ;; final CRC calculation
+ vpclmulqdq xtmp1, xcrc, xcrckey, 0x01
+ vpclmulqdq xcrc, xcrc, xcrckey, 0x10
+ vpxor xcrc, xtmp3
+ vpxor xcrc, xtmp1
+
+%%_128_done:
+ CRC32_REDUCE_128_TO_32 ethernet_fcs, xcrc, xtmp1, xtmp2, xcrckey
+
+%%_crc_done:
+ ;; @todo - store-to-load problem in ENC case (to be fixed later)
+ ;; - store CRC in input buffer and authentication tag output
+ ;; - encrypt remaining bytes
+%ifidn %%DIR, ENC
+ or DWORD(write_back_crc), DWORD(write_back_crc)
+ jz %%_skip_crc_write_back
+ mov [p_in + bytes_to_crc], DWORD(ethernet_fcs)
+%%_skip_crc_write_back:
+%endif
+ mov tmp, [job + _auth_tag_output]
+ mov [tmp + 4], DWORD(ethernet_fcs)
+
+ or num_bytes, num_bytes
+ jz %%_do_not_cipher_the_rest
+
+ ;; encrypt rest of the message
+ ;; - partial bytes including CRC and optional padding
+ ;; decrypt rest of the message
+ ;; - this may only happen when XGEM payload is short and padding is added
+%ifidn %%DIR, DEC
+ or DWORD(decrypt_not_done), DWORD(decrypt_not_done)
+ jnz %%_do_not_cipher_the_rest
+%endif
+ CIPHER_BIP_REST num_bytes, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+
+%%_do_not_cipher_the_rest:
+
+ ;; finalize BIP
+ vpsrldq xtmp1, xbip, 4
+ vpsrldq xtmp2, xbip, 8
+ vpsrldq xtmp3, xbip, 12
+ vpxor xtmp1, xtmp1, xtmp2
+ vpxor xbip, xbip, xtmp3
+ vpxor xbip, xbip, xtmp1
+ vmovd [tmp], xbip ; tmp already holds _auth_tag_output
+
+ ;; set job status
+ or dword [job + _status], STS_COMPLETED
+
+ ;; return job
+ mov rax, job
+
+%ifndef LINUX
+ pop r15
+%endif
+ pop r14
+ pop r13
+ pop r12
+%endmacro ; AES128_CTR_PON
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;; submit_job_pon_enc_avx(JOB_AES_HMAC *job)
+align 64
+MKGLOBAL(submit_job_pon_enc_avx,function,internal)
+submit_job_pon_enc_avx:
+ AES128_CTR_PON ENC, CTR
+ ret
+
+;;; submit_job_pon_dec_avx(JOB_AES_HMAC *job)
+align 64
+MKGLOBAL(submit_job_pon_dec_avx,function,internal)
+submit_job_pon_dec_avx:
+ AES128_CTR_PON DEC, CTR
+ ret
+
+;;; submit_job_pon_enc_no_ctr_avx(JOB_AES_HMAC *job)
+align 64
+MKGLOBAL(submit_job_pon_enc_no_ctr_avx,function,internal)
+submit_job_pon_enc_no_ctr_avx:
+ AES128_CTR_PON ENC, NO_CTR
+ ret
+
+;;; submit_job_pon_dec_no_ctr_avx(JOB_AES_HMAC *job)
+align 64
+MKGLOBAL(submit_job_pon_dec_no_ctr_avx,function,internal)
+submit_job_pon_dec_no_ctr_avx:
+ AES128_CTR_PON DEC, NO_CTR
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha1_mult_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha1_mult_avx.asm
new file mode 100644
index 000000000..b850a227b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha1_mult_avx.asm
@@ -0,0 +1,434 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+section .data
+default rel
+
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+section .text
+
+;; code to compute quad SHA1 using AVX
+;; derived from ...\sha1_multiple\sha1_quad4.asm
+;; variation of sha1_mult2.asm : clobbers all xmm regs, rcx left intact
+;; rbx, rsi, rdi, rbp, r12-r15 left intact
+;; This version is not safe to call from C/C++
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rdx r8 r9 r10 r11
+;; Windows preserves: rbx rcx rsi rdi rbp r12 r13 r14 r15
+;;
+;; Linux clobbers: rax rsi r8 r9 r10 r11
+;; Linux preserves: rbx rcx rdx rdi rbp r12 r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ vshufps %%r1, %%t0, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+
+ vshufps %%r3, %%r0, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ vshufps %%r0, %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ vshufps %%t0, %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+;;
+;; Magic functions defined in FIPS 180-1
+;;
+; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
+%macro MAGIC_F0 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ vpxor %%regF, %%regC,%%regD
+ vpand %%regF, %%regF,%%regB
+ vpxor %%regF, %%regF,%%regD
+%endmacro
+
+; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F1 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ vpxor %%regF,%%regD,%%regC
+ vpxor %%regF,%%regF,%%regB
+%endmacro
+
+; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
+%macro MAGIC_F2 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ vpor %%regF,%%regB,%%regC
+ vpand %%regT,%%regB,%%regC
+ vpand %%regF,%%regF,%%regD
+ vpor %%regF,%%regF,%%regT
+%endmacro
+
+; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F3 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpsrld %%tmp, %%reg, (32-(%%imm))
+ vpslld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; non-destructive
+; PROLD_nd reg, imm, tmp, src
+%macro PROLD_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ vpsrld %%tmp, %%src, (32-(%%imm))
+ vpslld %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+%macro SHA1_STEP_00_15 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ vpaddd %%regE, %%regE,%%immCNT
+ vpaddd %%regE, %%regE,[rsp + (%%memW * 16)]
+ PROLD_nd %%regT,5, %%regF,%%regA
+ vpaddd %%regE, %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ vpaddd %%regE, %%regE,%%regF
+%endmacro
+
+%macro SHA1_STEP_16_79 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ vpaddd %%regE, %%regE,%%immCNT
+
+ vmovdqa W14, [rsp + ((%%memW - 14) & 15) * 16]
+ vpxor W16, W16, W14
+ vpxor W16, W16, [rsp + ((%%memW - 8) & 15) * 16]
+ vpxor W16, W16, [rsp + ((%%memW - 3) & 15) * 16]
+
+ vpsrld %%regF, W16, (32-1)
+ vpslld W16, W16, 1
+ vpor %%regF, %%regF, W16
+ ROTATE_W
+
+ vmovdqa [rsp + ((%%memW - 0) & 15) * 16],%%regF
+ vpaddd %%regE, %%regE,%%regF
+
+ PROLD_nd %%regT,5, %%regF, %%regA
+ vpaddd %%regE, %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ vpaddd %%regE,%%regE,%%regF
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; FRAMESZ must be an odd multiple of 8
+%define FRAMESZ 16*16 + 8
+
+%define VMOVPS vmovdqu
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+
+%define IDX rax
+
+%define A xmm0
+%define B xmm1
+%define C xmm2
+%define D xmm3
+%define E xmm4
+%define F xmm5 ; tmp
+%define G xmm6 ; tmp
+
+%define TMP G
+%define FUN F
+%define K xmm7
+
+%define AA xmm8
+%define BB xmm9
+%define CC xmm10
+%define DD xmm11
+%define EE xmm12
+
+%define T0 xmm6
+%define T1 xmm7
+%define T2 xmm8
+%define T3 xmm9
+%define T4 xmm10
+%define T5 xmm11
+
+%define W14 xmm13
+%define W15 xmm14
+%define W16 xmm15
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+%macro ROTATE_W 0
+%xdefine TMP_ W16
+%xdefine W16 W15
+%xdefine W15 W14
+%xdefine W14 TMP_
+%endm
+
+align 32
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void sha1_mult_avx(SHA1_ARGS *args, UINT32 size_in_blocks);
+; arg 1 : rcx : pointer to args
+; arg 2 : rdx : size (in blocks) ;; assumed to be >= 1
+MKGLOBAL(sha1_mult_avx,function,internal)
+sha1_mult_avx:
+
+ sub rsp, FRAMESZ
+
+ ;; Initialize digests
+ vmovdqa A, [arg1 + 0*SHA1_DIGEST_ROW_SIZE]
+ vmovdqa B, [arg1 + 1*SHA1_DIGEST_ROW_SIZE]
+ vmovdqa C, [arg1 + 2*SHA1_DIGEST_ROW_SIZE]
+ vmovdqa D, [arg1 + 3*SHA1_DIGEST_ROW_SIZE]
+ vmovdqa E, [arg1 + 4*SHA1_DIGEST_ROW_SIZE]
+
+ ;; transpose input onto stack
+ mov inp0,[arg1 + _data_ptr_sha1 + 0*PTR_SZ]
+ mov inp1,[arg1 + _data_ptr_sha1 + 1*PTR_SZ]
+ mov inp2,[arg1 + _data_ptr_sha1 + 2*PTR_SZ]
+ mov inp3,[arg1 + _data_ptr_sha1 + 3*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ vmovdqa F, [rel PSHUFFLE_BYTE_FLIP_MASK]
+%assign I 0
+%rep 4
+ VMOVPS T2,[inp0+IDX]
+ VMOVPS T1,[inp1+IDX]
+ VMOVPS T4,[inp2+IDX]
+ VMOVPS T3,[inp3+IDX]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ vpshufb T0, T0, F
+ vmovdqa [rsp+(I*4+0)*16],T0
+ vpshufb T1, T1, F
+ vmovdqa [rsp+(I*4+1)*16],T1
+ vpshufb T2, T2, F
+ vmovdqa [rsp+(I*4+2)*16],T2
+ vpshufb T3, T3, F
+ vmovdqa [rsp+(I*4+3)*16],T3
+ add IDX, 4*4
+%assign I (I+1)
+%endrep
+
+ ; save old digests
+ vmovdqa AA, A
+ vmovdqa BB, B
+ vmovdqa CC, C
+ vmovdqa DD, D
+ vmovdqa EE, E
+
+;;
+;; perform 0-79 steps
+;;
+ vmovdqa K, [rel K00_19]
+;; do rounds 0...15
+%assign I 0
+%rep 16
+ SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 16...19
+ vmovdqa W16, [rsp + ((16 - 16) & 15) * 16]
+ vmovdqa W15, [rsp + ((16 - 15) & 15) * 16]
+%rep 4
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 20...39
+ vmovdqa K, [rel K20_39]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 40...59
+ vmovdqa K, [rel K40_59]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 60...79
+ vmovdqa K, [rel K60_79]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+ vpaddd A,A,AA
+ vpaddd B,B,BB
+ vpaddd C,C,CC
+ vpaddd D,D,DD
+ vpaddd E,E,EE
+
+ sub arg2, 1
+ jne lloop
+
+ ; write out digests
+ vmovdqa [arg1 + 0*SHA1_DIGEST_ROW_SIZE], A
+ vmovdqa [arg1 + 1*SHA1_DIGEST_ROW_SIZE], B
+ vmovdqa [arg1 + 2*SHA1_DIGEST_ROW_SIZE], C
+ vmovdqa [arg1 + 3*SHA1_DIGEST_ROW_SIZE], D
+ vmovdqa [arg1 + 4*SHA1_DIGEST_ROW_SIZE], E
+
+ ; update input pointers
+ add inp0, IDX
+ mov [arg1 + _data_ptr_sha1 + 0*PTR_SZ], inp0
+ add inp1, IDX
+ mov [arg1 + _data_ptr_sha1 + 1*PTR_SZ], inp1
+ add inp2, IDX
+ mov [arg1 + _data_ptr_sha1 + 2*PTR_SZ], inp2
+ add inp3, IDX
+ mov [arg1 + _data_ptr_sha1 + 3*PTR_SZ], inp3
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear all stack containing part of message
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+%assign i 0
+%rep 16
+ vmovdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, FRAMESZ
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha1_one_block_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha1_one_block_avx.asm
new file mode 100644
index 000000000..090285e54
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha1_one_block_avx.asm
@@ -0,0 +1,501 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; SHA1 code, hybrid, rolled, interleaved
+; Uses AVX instructions
+%include "include/os.asm"
+
+section .data
+default rel
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+section .text
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%ifdef LINUX
+%define INP rdi ; 1st arg
+%define CTX rsi ; 2nd arg
+%define REG3 edx
+%define REG4 ecx
+%else
+%define INP rcx ; 1st arg
+%define CTX rdx ; 2nd arg
+%define REG3 edi
+%define REG4 esi
+%endif
+
+%define FRAMESZ 3*16 + 1*8
+%define _RSP FRAMESZ-1*8 + rsp
+
+%define a eax
+%define b ebx
+%define c REG3
+%define d REG4
+%define e r8d
+%define T1 r9d
+%define f r10d
+%define RND r11d
+%define g r12d
+%define h r13d
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XK xmm2
+
+%xdefine X0 xmm3
+%xdefine X1 xmm4
+%xdefine X2 xmm5
+%xdefine X3 xmm6
+%xdefine X4 xmm7
+
+%define XFER xmm8
+
+%define SZ 4
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X4
+%xdefine X4 X_
+%endmacro
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+
+;; Magic functions defined in FIPS 180-1
+;;
+; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
+%macro MAGIC_F0 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regC
+ xor %%regF,%%regD
+ and %%regF,%%regB
+ xor %%regF,%%regD
+%endmacro
+
+; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F1 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regD
+ xor %%regF,%%regC
+ xor %%regF,%%regB
+%endmacro
+
+; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
+%macro MAGIC_F2 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regB
+ mov %%regT,%%regB
+ or %%regF,%%regC
+ and %%regT,%%regC
+ and %%regF,%%regD
+ or %%regF,%%regT
+%endmacro
+
+; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F3 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
+%endmacro
+
+;; input is T1
+%macro ROUND 1
+%define %%MAGIC %1
+ add e,T1
+ mov T1,a
+ rol T1,5
+ add e,T1
+ %%MAGIC h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ rol b,30
+ add h,e
+ROTATE_ARGS
+%endmacro
+
+%macro do_4i 1
+ vpaddd XFER, XK, X0
+ vpextrd T1, XFER, 0
+ ;ROUND %1
+ add e,T1
+ ;SCHEDULE_4
+ vpalignr XTMP0, X1, X0, 8 ; XTMP0 = W[-14]
+ mov T1,a
+ rol T1,5
+ vpxor XTMP1, X2, X0 ; XTMP1 = W[-8] ^ W[-16]
+ add e,T1
+ vpxor XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-8] ^ W[-14] ^ W[-16]
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+
+ ;; Finish low half
+ rol b,30
+ vpsrldq X4, X3, 4 ; X4 = W[-3] {xxBA}
+ add h,e
+ROTATE_ARGS
+ vpextrd T1, XFER, 1
+ ;ROUND %1
+ add e,T1
+ vpxor X4, X4, XTMP0
+ mov T1,a
+ rol T1,5
+ ;; rotate X4 left 1
+ vpsrld XTMP1, X4, (32-1)
+ add e,T1
+ vpslld X4, X4, 1
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ vpxor X4, X4, XTMP1 ; X4 = W[0] {xxBA}
+ rol b,30
+ add h,e
+ROTATE_ARGS
+ vpextrd T1, XFER, 2
+ ;ROUND %1
+ add e,T1
+ mov T1,a
+
+ ;; Finish high half
+ vpalignr XTMP1, X4, X3, 4 ; XTMP1 = w[-3] {DCxx}
+ rol T1,5
+ add e,T1
+ vpxor XTMP0, XTMP0, XTMP1
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ ;; rotate XTMP0 left 1
+ vpsrld XTMP1, XTMP0, (32-1)
+ rol b,30
+ add h,e
+ROTATE_ARGS
+ vpextrd T1, XFER, 3
+ ;ROUND %1
+ add e,T1
+ mov T1,a
+ vpslld XTMP0, XTMP0, 1
+ rol T1,5
+ add e,T1
+ vpxor XTMP0, XTMP0, XTMP1 ; XTMP0 = W[0] {DCxx}
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ ;; COMBINE HALVES
+ vshufps X4, X4, XTMP0, 11100100b ; X4 = X[0] {DCBA}
+ rol b,30
+ add h,e
+
+ rotate_Xs
+ROTATE_ARGS
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha1_block_avx(void *input_data, UINT32 digest[5])
+;; arg 1 : (in) pointer to input data
+;; arg 2 : (in/out) pointer to read/write digest
+MKGLOBAL(sha1_block_avx,function,internal)
+align 32
+sha1_block_avx:
+ push rbx
+ push rsi
+ push rdi
+ push r12
+ push r13
+
+ vmovdqa XTMP0, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+%ifndef LINUX
+ mov rax,rsp ; copy rsp
+ sub rsp,FRAMESZ
+ and rsp,-16 ; align stack frame
+ mov [_RSP],rax ; save copy of rsp
+ vmovdqa [rsp + 0 * 16], xmm6
+ vmovdqa [rsp + 1 * 16], xmm7
+ vmovdqa [rsp + 2 * 16], xmm8
+%endif
+
+ VMOVDQ X0, [INP + 0*16]
+ VMOVDQ X1, [INP + 1*16]
+
+ ;; load next message block
+ VMOVDQ X2, [INP + 2*16]
+ VMOVDQ X3, [INP + 3*16]
+
+ ;; set up a-f based on h0-h4
+ ;; byte swap first 16 dwords
+ mov a, [SZ*0 + CTX]
+ vpshufb X0, XTMP0
+ mov b, [SZ*1 + CTX]
+ vpshufb X1, XTMP0
+ mov c, [SZ*2 + CTX]
+ vpshufb X2, XTMP0
+ mov d, [SZ*3 + CTX]
+ vpshufb X3, XTMP0
+ mov e, [SZ*4 + CTX]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 00-19
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqa XK, [rel K00_19]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop1_5
+align 16
+loop1:
+
+ do_4i MAGIC_F0
+
+loop1_5:
+ do_4i MAGIC_F0
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ vmovdqa X0, X2
+ vmovdqa X2, X4
+ vmovdqa X4, X1
+ vmovdqa X1, X3
+
+ sub RND, 1
+ jne loop1
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 00-19
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 20-39
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqa XK, [rel K20_39]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop2_5
+align 16
+loop2:
+
+ do_4i MAGIC_F1
+
+loop2_5:
+ do_4i MAGIC_F1
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ vmovdqa X0, X2
+ vmovdqa X2, X4
+ vmovdqa X4, X1
+ vmovdqa X1, X3
+
+ sub RND, 1
+ jne loop2
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 20-39
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 40-59
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqa XK, [rel K40_59]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop3_5
+align 16
+loop3:
+
+ do_4i MAGIC_F2
+
+loop3_5:
+ do_4i MAGIC_F2
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ vmovdqa X0, X2
+ vmovdqa X2, X4
+ vmovdqa X4, X1
+ vmovdqa X1, X3
+
+ sub RND, 1
+ jne loop3
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 40-59
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 60-79
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqa XK, [rel K60_79]
+
+ do_4i MAGIC_F3
+
+ vpaddd XFER, XK, X0
+ vpextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ vpaddd XFER, XK, X1
+ vpextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ vpaddd XFER, XK, X2
+ vpextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ vpaddd XFER, XK, X3
+ vpextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ vpextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ ;; update result digest h0-h4
+ add [SZ*0 + CTX], a
+ add [SZ*1 + CTX], b
+ add [SZ*2 + CTX], c
+ add [SZ*3 + CTX], d
+ add [SZ*4 + CTX], e
+
+%ifndef LINUX
+ vmovdqa xmm8, [rsp + 2 * 16]
+ vmovdqa xmm7, [rsp + 1 * 16]
+ vmovdqa xmm6, [rsp + 0 * 16]
+
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ vpxor xmm0, xmm0
+ vmovdqa [rsp + 0 * 16], xmm0
+ vmovdqa [rsp + 1 * 16], xmm0
+ vmovdqa [rsp + 2 * 16], xmm0
+%endif
+
+ mov rsp,[_RSP]
+%endif ;; LINUX
+
+ pop r13
+ pop r12
+ pop rdi
+ pop rsi
+ pop rbx
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha224_one_block_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha224_one_block_avx.asm
new file mode 100644
index 000000000..57d997dd3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha224_one_block_avx.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define FUNC sha224_block_avx
+
+%include "avx/sha256_one_block_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/sha256_one_block_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha256_one_block_avx.asm
new file mode 100644
index 000000000..9c96f036b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha256_one_block_avx.asm
@@ -0,0 +1,553 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%include "include/os.asm"
+
+section .data
+default rel
+align 64
+K256:
+ dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+; shuffle xBxA -> 00BA
+_SHUF_00BA: ;ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100
+ dq 0x0b0a090803020100, 0xFFFFFFFFFFFFFFFF
+
+; shuffle xDxC -> DC00
+_SHUF_DC00: ;ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF
+ dq 0xFFFFFFFFFFFFFFFF, 0x0b0a090803020100
+
+section .text
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+%macro MY_ROR 2
+ shld %1,%1,(32-(%2))
+%endm
+
+; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+; Load xmm with mem and byte swap each dword
+%macro COPY_XMM_AND_BSWAP 3
+ VMOVDQ %1, %2
+ vpshufb %1, %1, %3
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define X0 xmm4
+%define X1 xmm5
+%define X2 xmm6
+%define X3 xmm7
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XTMP2 xmm2
+%define XTMP3 xmm3
+%define XTMP4 xmm8
+%define XFER xmm9
+%define XTMP5 xmm11
+
+%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA
+%define SHUF_DC00 xmm12 ; shuffle xDxC -> DC00
+%define BYTE_FLIP_MASK xmm13
+
+%ifdef LINUX
+%define CTX rsi ; 2nd arg
+%define INP rdi ; 1st arg
+
+%define SRND rdi ; clobbers INP
+%define c ecx
+%define d r8d
+%define e edx
+%else
+%define CTX rdx ; 2nd arg
+%define INP rcx ; 1st arg
+
+%define SRND rcx ; clobbers INP
+%define c edi
+%define d esi
+%define e r8d
+
+%endif
+%define TBL rbp
+%define a eax
+%define b ebx
+
+%define f r9d
+%define g r10d
+%define h r11d
+
+%define y0 r13d
+%define y1 r14d
+%define y2 r15d
+
+
+struc STACK
+%ifndef LINUX
+_XMM_SAVE: reso 7
+%endif
+_XFER: reso 1
+endstruc
+
+%ifndef FUNC
+%define FUNC sha256_block_avx
+%endif
+
+; rotate_Xs
+; Rotate values of symbols X0...X3
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X_
+%endm
+
+; ROTATE_ARGS
+; Rotate values of symbols a...h
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+%macro FOUR_ROUNDS_AND_SCHED 0
+ ;; compute s0 four at a time and s1 two at a time
+ ;; compute W[-16] + W[-7] 4 at a time
+ ;vmovdqa XTMP0, X3
+ mov y0, e ; y0 = e
+ MY_ROR y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ vpalignr XTMP0, X3, X2, 4 ; XTMP0 = W[-7]
+ MY_ROR y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ ;vmovdqa XTMP1, X1
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+ vpaddd XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ ;; compute s0
+ vpalignr XTMP1, X1, X0, 4 ; XTMP1 = W[-15]
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+
+ MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH
+
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+
+ vpsrld XTMP2, XTMP1, 7
+
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+
+ vpslld XTMP3, XTMP1, (32-7)
+
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+
+ vpor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7
+
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+
+
+ MY_ROR y0, (25-11) ; y0 = e >> (25-11)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ MY_ROR y1, (22-13) ; y1 = a >> (22-13)
+
+ vpsrld XTMP2, XTMP1,18
+
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+
+ vpsrld XTMP4, XTMP1, 3 ; XTMP4 = W[-15] >> 3
+
+ MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+
+ vpslld XTMP1, XTMP1, (32-18)
+
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+
+ vpxor XTMP3, XTMP3, XTMP1
+
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH
+ MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+
+ vpxor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18
+
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+
+ vpxor XTMP1, XTMP3, XTMP4 ; XTMP1 = s0
+
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ ;; compute low s1
+ vpshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ vpaddd XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA}
+
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ MY_ROR y0, (25-11) ; y0 = e >> (25-11)
+
+ ;vmovdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA}
+
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ MY_ROR y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+
+ vpsrld XTMP4, XTMP2, 10 ; XTMP4 = W[-2] >> 10 {BBAA}
+
+ xor y2, g ; y2 = f^g
+
+ vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xBxA}
+
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+
+ vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xBxA}
+
+ MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ vpxor XTMP2, XTMP2, XTMP3
+ add y2, y0 ; y2 = S1 + CH
+ MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH
+ vpxor XTMP4, XTMP4, XTMP2 ; XTMP4 = s1 {xBxA}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ vpshufb XTMP4, XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ vpaddd XTMP0, XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ ;; compute high s1
+ vpshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC}
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC}
+ mov y0, e ; y0 = e
+ MY_ROR y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ ;vmovdqa XTMP5, XTMP2 ; XTMP5 = W[-2] {DDCC}
+ MY_ROR y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+
+ vpsrld XTMP5, XTMP2, 10 ; XTMP5 = W[-2] >> 10 {DDCC}
+
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+
+ vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xDxC}
+
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+
+ vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xDxC}
+
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+
+ vpxor XTMP2, XTMP2, XTMP3
+
+ MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH
+ vpxor XTMP5, XTMP5, XTMP2 ; XTMP5 = s1 {xDxC}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ vpshufb XTMP5, XTMP5, SHUF_DC00 ; XTMP5 = s1 {DC00}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ vpaddd X0, XTMP5, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+rotate_Xs
+%endm
+
+;; input is [rsp + _XFER + %1 * 4]
+%macro DO_ROUND 1
+ mov y0, e ; y0 = e
+ MY_ROR y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ MY_ROR y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ and y2, e ; y2 = (f^g)&e
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+ ROTATE_ARGS
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void FUNC(void *input_data, UINT32 digest[8], UINT64 num_blks)
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+section .text
+MKGLOBAL(FUNC,function,internal)
+align 32
+FUNC:
+ push rbx
+%ifndef LINUX
+ push rsi
+ push rdi
+%endif
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp,STACK_size
+%ifndef LINUX
+ vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6
+ vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7
+ vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8
+ vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9
+ vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10
+ vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11
+ vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12
+ vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13
+%endif
+
+ ;; load initial digest
+ mov a, [4*0 + CTX]
+ mov b, [4*1 + CTX]
+ mov c, [4*2 + CTX]
+ mov d, [4*3 + CTX]
+ mov e, [4*4 + CTX]
+ mov f, [4*5 + CTX]
+ mov g, [4*6 + CTX]
+ mov h, [4*7 + CTX]
+
+ vmovdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ vmovdqa SHUF_00BA, [rel _SHUF_00BA]
+ vmovdqa SHUF_DC00, [rel _SHUF_DC00]
+
+ lea TBL,[rel K256]
+
+ ;; byte swap first 16 dwords
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+
+ ;; schedule 48 input dwords, by doing 3 rounds of 16 each
+ mov SRND, 3
+align 16
+loop1:
+ vpaddd XFER, X0, [TBL + 0*16]
+ vmovdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ vpaddd XFER, X0, [TBL + 1*16]
+ vmovdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ vpaddd XFER, X0, [TBL + 2*16]
+ vmovdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ vpaddd XFER, X0, [TBL + 3*16]
+ vmovdqa [rsp + _XFER], XFER
+ add TBL, 4*16
+ FOUR_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne loop1
+
+ mov SRND, 2
+loop2:
+ vpaddd XFER, X0, [TBL + 0*16]
+ vmovdqa [rsp + _XFER], XFER
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+
+ vpaddd XFER, X1, [TBL + 1*16]
+ vmovdqa [rsp + _XFER], XFER
+ add TBL, 2*16
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+
+ vmovdqa X0, X2
+ vmovdqa X1, X3
+
+ sub SRND, 1
+ jne loop2
+
+ add [4*0 + CTX], a
+ add [4*1 + CTX], b
+ add [4*2 + CTX], c
+ add [4*3 + CTX], d
+ add [4*4 + CTX], e
+ add [4*5 + CTX], f
+ add [4*6 + CTX], g
+ add [4*7 + CTX], h
+
+done_hash:
+%ifndef LINUX
+ vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16]
+ vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16]
+ vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16]
+ vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16]
+ vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16]
+ vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16]
+ vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16]
+ vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16]
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ vpxor xmm0, xmm0
+ vmovdqa [rsp + _XMM_SAVE + 0 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 1 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 2 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 3 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 4 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 5 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 6 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 7 * 16], xmm0
+%endif
+%endif ;; LINUX
+
+ add rsp, STACK_size
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+%ifndef LINUX
+ pop rdi
+ pop rsi
+%endif
+ pop rbx
+
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha384_one_block_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha384_one_block_avx.asm
new file mode 100644
index 000000000..dddc5df28
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha384_one_block_avx.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define FUNC sha384_block_avx
+
+%include "avx/sha512_one_block_avx.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm
new file mode 100644
index 000000000..040518e76
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm
@@ -0,0 +1,473 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%include "include/os.asm"
+
+%define VMOVDQ vmovdqu ;; assume buffers not aligned
+
+%ifndef FUNC
+%define FUNC sha512_block_avx
+%endif
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+%macro MY_ROR 2
+shld %1,%1,(64-(%2))
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+; Load xmm with mem and byte swap each dword
+%macro COPY_XMM_AND_BSWAP 3
+ VMOVDQ %1, %2
+ vpshufb %1, %3
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define X0 xmm4
+%define X1 xmm5
+%define X2 xmm6
+%define X3 xmm7
+%define X4 xmm8
+%define X5 xmm9
+%define X6 xmm10
+%define X7 xmm11
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XTMP2 xmm2
+%define XTMP3 xmm3
+%define XFER xmm13
+
+%define BYTE_FLIP_MASK xmm12
+
+%ifdef LINUX
+%define CTX rsi ; 2nd arg
+%define INP rdi ; 1st arg
+
+%define SRND rdi ; clobbers INP
+%define c rcx
+%define d r8
+%define e rdx
+%else
+%define CTX rdx ; 2nd arg
+%define INP rcx ; 1st arg
+
+%define SRND rcx ; clobbers INP
+%define c rdi
+%define d rsi
+%define e r8
+
+%endif
+%define TBL rbp
+%define a rax
+%define b rbx
+
+%define f r9
+%define g r10
+%define h r11
+
+%define y0 r13
+%define y1 r14
+%define y2 r15
+
+struc STACK
+%ifndef LINUX
+_XMM_SAVE: reso 8
+%endif
+_XFER: reso 1
+endstruc
+
+
+; rotate_Xs
+; Rotate values of symbols X0...X7
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X4
+%xdefine X4 X5
+%xdefine X5 X6
+%xdefine X6 X7
+%xdefine X7 X_
+%endm
+
+; ROTATE_ARGS
+; Rotate values of symbols a...h
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+%macro TWO_ROUNDS_AND_SCHED 0
+
+ vpalignr XTMP0, X5, X4, 8 ; XTMP0 = W[-7]
+ ;; compute s0 four at a time and s1 two at a time
+ ;; compute W[-16] + W[-7] 4 at a time
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ MY_ROR y0, (41-18) ; y0 = e >> (41-18)
+ vpaddq XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ mov y2, f ; y2 = f
+ MY_ROR y1, (39-34) ; y1 = a >> (39-34)
+ ;; compute s0
+ vpalignr XTMP1, X1, X0, 8 ; XTMP1 = W[-15]
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ vpsllq XTMP2, XTMP1, (64-1)
+ xor y2, g ; y2 = f^g
+ MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ vpsrlq XTMP3, XTMP1, 1
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
+ and y2, e ; y2 = (f^g)&e
+ MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ vpor XTMP2, XTMP2, XTMP3 ; XTMP2 = W[-15] ror 1
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ vpsrlq XTMP3, XTMP1, 8
+ add y2, [rsp + _XFER + 0*8] ; y2 = k + w + S1 + CH
+ MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ mov y0, a ; y0 = a
+ vpsllq X0, XTMP1, (64-8)
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ vpor X0, X0, XTMP3
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ vpsrlq XTMP1, XTMP1, 7 ; X0 = W[-15] >> 7
+ add h, y1 ; h = t1 + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ vpxor XTMP1, XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1 ^ W[-15] ror 8
+ add h, y0 ; h = t1 + S0 + MAJ
+ vpxor XTMP1, XTMP1, X0 ; XTMP1 = s0
+
+
+ROTATE_ARGS
+ ;; compute s1
+ vpaddq XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ MY_ROR y0, (41-18) ; y0 = e >> (41-18)
+ vpsllq XTMP3, X7, (64-19)
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ mov y2, f ; y2 = f
+ MY_ROR y1, (39-34) ; y1 = a >> (39-34)
+ vpsrlq X0, X7, 19
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ vpor XTMP3, XTMP3, X0 ; XTMP3 = W[-2] ror 19
+ xor y2, g ; y2 = f^g
+ MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ vpsllq XTMP2, X7, (64-61)
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
+ and y2, e ; y2 = (f^g)&e
+ MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ vpsrlq XTMP1, X7, 61
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ vpor XTMP2, XTMP2, XTMP1 ; XTMP2 = W[-2] ror 61
+ add y2, [rsp + _XFER + 1*8] ; y2 = k + w + S1 + CH
+ MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ mov y0, a ; y0 = a
+ vpsrlq XTMP1, X7, 6 ; XTMP1 = W[-2] >> 6
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ vpxor XTMP1, XTMP1, XTMP2
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ vpxor X0, XTMP3, XTMP1 ; X0 = s1
+ add h, y1 ; h = t1 + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = t1 + S0 + MAJ
+ vpaddq X0, X0, XTMP0 ; X0 = {W[1], W[0]}
+
+ROTATE_ARGS
+rotate_Xs
+%endm
+
+;; input is [rsp + _XFER + %1 * 8]
+%macro DO_ROUND 1
+ mov y0, e ; y0 = e
+ MY_ROR y0, (41-18) ; y0 = e >> (41-18)
+ mov y1, a ; y1 = a
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ MY_ROR y1, (39-34) ; y1 = a >> (39-34)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ xor y2, g ; y2 = f^g
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (25-6))
+ MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ and y2, e ; y2 = (f^g)&e
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ add y2, [rsp + _XFER + %1*8] ; y2 = k + w + S1 + CH
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = t1 + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = t1 + S0 + MAJ
+ ROTATE_ARGS
+%endm
+
+section .data
+default rel
+align 64
+K512:
+ dq 0x428a2f98d728ae22,0x7137449123ef65cd
+ dq 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ dq 0x3956c25bf348b538,0x59f111f1b605d019
+ dq 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ dq 0xd807aa98a3030242,0x12835b0145706fbe
+ dq 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ dq 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ dq 0x9bdc06a725c71235,0xc19bf174cf692694
+ dq 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ dq 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ dq 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ dq 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ dq 0x983e5152ee66dfab,0xa831c66d2db43210
+ dq 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ dq 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ dq 0x06ca6351e003826f,0x142929670a0e6e70
+ dq 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ dq 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ dq 0x650a73548baf63de,0x766a0abb3c77b2a8
+ dq 0x81c2c92e47edaee6,0x92722c851482353b
+ dq 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ dq 0xc24b8b70d0f89791,0xc76c51a30654be30
+ dq 0xd192e819d6ef5218,0xd69906245565a910
+ dq 0xf40e35855771202a,0x106aa07032bbd1b8
+ dq 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ dq 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ dq 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ dq 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ dq 0x748f82ee5defb2fc,0x78a5636f43172f60
+ dq 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ dq 0x90befffa23631e28,0xa4506cebde82bde9
+ dq 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ dq 0xca273eceea26619c,0xd186b8c721c0c207
+ dq 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ dq 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ dq 0x113f9804bef90dae,0x1b710b35131c471b
+ dq 0x28db77f523047d84,0x32caab7b40c72493
+ dq 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ dq 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ dq 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void FUNC(void *input_data, UINT64 digest[8])
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+section .text
+MKGLOBAL(FUNC,function,internal)
+align 32
+FUNC:
+ push rbx
+%ifndef LINUX
+ push rsi
+ push rdi
+%endif
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp,STACK_size
+%ifndef LINUX
+ vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6
+ vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7
+ vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8
+ vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9
+ vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10
+ vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11
+ vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12
+ vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13
+%endif
+
+ ;; load initial digest
+ mov a, [8*0 + CTX]
+ mov b, [8*1 + CTX]
+ mov c, [8*2 + CTX]
+ mov d, [8*3 + CTX]
+ mov e, [8*4 + CTX]
+ mov f, [8*5 + CTX]
+ mov g, [8*6 + CTX]
+ mov h, [8*7 + CTX]
+
+ vmovdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+ lea TBL,[rel K512]
+
+ ;; byte swap first 16 qwords
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X4, [INP + 4*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X5, [INP + 5*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X6, [INP + 6*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X7, [INP + 7*16], BYTE_FLIP_MASK
+
+ ;; schedule 64 input qwords, by doing 4 iterations of 16 rounds
+ mov SRND, 4
+align 16
+loop1:
+
+%assign i 0
+%rep 7
+ vpaddq XFER, X0, [TBL + i*16]
+ vmovdqa [rsp + _XFER], XFER
+ TWO_ROUNDS_AND_SCHED
+%assign i (i+1)
+%endrep
+
+ vpaddq XFER, X0, [TBL + 7*16]
+ vmovdqa [rsp + _XFER], XFER
+ add TBL, 8*16
+ TWO_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne loop1
+
+ mov SRND, 2
+ jmp loop2a
+loop2:
+ vmovdqa X0, X4
+ vmovdqa X1, X5
+ vmovdqa X2, X6
+ vmovdqa X3, X7
+
+loop2a:
+ vpaddq X0, X0, [TBL + 0*16]
+ vmovdqa [rsp + _XFER], X0
+ DO_ROUND 0
+ DO_ROUND 1
+
+ vpaddq X1, X1, [TBL + 1*16]
+ vmovdqa [rsp + _XFER], X1
+ DO_ROUND 0
+ DO_ROUND 1
+
+ vpaddq X2, X2, [TBL + 2*16]
+ vmovdqa [rsp + _XFER], X2
+ DO_ROUND 0
+ DO_ROUND 1
+
+ vpaddq X3, X3, [TBL + 3*16]
+ vmovdqa [rsp + _XFER], X3
+ add TBL, 4*16
+ DO_ROUND 0
+ DO_ROUND 1
+
+ sub SRND, 1
+ jne loop2
+
+ add [8*0 + CTX], a
+ add [8*1 + CTX], b
+ add [8*2 + CTX], c
+ add [8*3 + CTX], d
+ add [8*4 + CTX], e
+ add [8*5 + CTX], f
+ add [8*6 + CTX], g
+ add [8*7 + CTX], h
+
+done_hash:
+%ifndef LINUX
+ vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16]
+ vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16]
+ vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16]
+ vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16]
+ vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16]
+ vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16]
+ vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16]
+ vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16]
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ vpxor xmm0, xmm0
+ vmovdqa [rsp + _XMM_SAVE + 0 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 1 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 2 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 3 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 4 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 5 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 6 * 16], xmm0
+ vmovdqa [rsp + _XMM_SAVE + 7 * 16], xmm0
+%endif
+%endif ;; LINUX
+
+ add rsp, STACK_size
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+%ifndef LINUX
+ pop rdi
+ pop rsi
+%endif
+ pop rbx
+
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha512_x2_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha512_x2_avx.asm
new file mode 100644
index 000000000..d7d712e2c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha512_x2_avx.asm
@@ -0,0 +1,381 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute SHA512 by-2 using AVX
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rdx r8 r9 r10 r11
+;; Windows preserves: rbx rcx rsi rdi rbp r12 r13 r14 r15
+;;
+;; Linux clobbers: rax rsi r8 r9 r10 r11
+;; Linux preserves: rbx rcx rdx rdi rbp r12 r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+extern K512_2
+
+section .data
+default rel
+
+align 32
+; one from sha512_rorx
+; this does the big endian to little endian conversion
+; over a quad word
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+ ;ddq 0x18191a1b1c1d1e1f1011121314151617
+ dq 0x1011121314151617, 0x18191a1b1c1d1e1f
+
+section .text
+
+%ifdef LINUX ; Linux definitions
+%define arg1 rdi
+%define arg2 rsi
+%else ; Windows definitions
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND r8
+%define TBL r11
+
+%define inp0 r9
+%define inp1 r10
+
+%define a xmm0
+%define b xmm1
+%define c xmm2
+%define d xmm3
+%define e xmm4
+%define f xmm5
+%define g xmm6
+%define h xmm7
+
+%define a0 xmm8
+%define a1 xmm9
+%define a2 xmm10
+
+%define TT0 xmm14
+%define TT1 xmm13
+%define TT2 xmm12
+%define TT3 xmm11
+%define TT4 xmm10
+%define TT5 xmm9
+
+%define T1 xmm14
+%define TMP xmm15
+
+
+
+%define SZ2 2*SHA512_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 80*SZ2
+
+; Define stack usage
+
+struc STACK
+_DATA: resb SZ2 * 16
+_DIGEST: resb SZ2 * NUM_SHA512_DIGEST_WORDS
+ resb 8 ; for alignment, must be odd multiple of 8
+endstruc
+
+%define VMOVPD vmovupd
+
+; transpose r0, r1, t0
+; Input looks like {r0 r1}
+; r0 = {a1 a0}
+; r1 = {b1 b0}
+;
+; output looks like
+; r0 = {b0, a0}
+; t0 = {b1, a1}
+
+%macro TRANSPOSE 3
+%define %%r0 %1
+%define %%r1 %2
+%define %%t0 %3
+ vshufpd %%t0, %%r0, %%r1, 11b ; t0 = b1 a1
+ vshufpd %%r0, %%r0, %%r1, 00b ; r0 = b0 a0
+%endm
+
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORQ reg, imm, tmp
+; packed-rotate-right-double
+; does a rotate by doing two shifts and an or
+%macro PRORQ 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpsllq %%tmp, %%reg, (64-(%%imm))
+ vpsrlq %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; non-destructive
+; PRORQ_nd reg, imm, tmp, src
+%macro PRORQ_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ vpsllq %%tmp, %%src, (64-(%%imm))
+ vpsrlq %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; PRORQ dst/src, amt
+%macro PRORQ 2
+ PRORQ %1, %2, TMP
+%endmacro
+
+; PRORQ_nd dst, src, amt
+%macro PRORQ_nd 3
+ PRORQ_nd %1, %3, TMP, %2
+%endmacro
+
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ PRORQ_nd a0, e, (18-14) ; sig1: a0 = (e >> 4)
+
+ vpxor a2, f, g ; ch: a2 = f^g
+ vpand a2, a2, e ; ch: a2 = (f^g)&e
+ vpxor a2, a2, g ; a2 = ch
+
+ PRORQ_nd a1, e, 41 ; sig1: a1 = (e >> 41)
+ vmovdqa [SZ2*(%%i&0xf) + rsp + _DATA],%%T1
+ vpaddq %%T1,%%T1,[TBL + ROUND] ; T1 = W + K
+ vpxor a0, a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORQ a0, 14 ; sig1: a0 = (e >> 14) ^ (e >> 18)
+ vpaddq h, h, a2 ; h = h + ch
+ PRORQ_nd a2, a, (34-28) ; sig0: a2 = (a >> 6)
+ vpaddq h, h, %%T1 ; h = h + ch + W + K
+ vpxor a0, a0, a1 ; a0 = sigma1
+ vmovdqa %%T1, a ; maj: T1 = a
+ PRORQ_nd a1, a, 39 ; sig0: a1 = (a >> 39)
+ vpxor %%T1, %%T1, c ; maj: T1 = a^c
+ add ROUND, SZ2 ; ROUND++
+ vpand %%T1, %%T1, b ; maj: T1 = (a^c)&b
+ vpaddq h, h, a0
+
+ vpaddq d, d, h
+
+ vpxor a2, a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORQ a2, 28 ; sig0: a2 = (a >> 28) ^ (a >> 34)
+ vpxor a2, a2, a1 ; a2 = sig0
+ vpand a1, a, c ; maj: a1 = a&c
+ vpor a1, a1, %%T1 ; a1 = maj
+ vpaddq h, h, a1 ; h = h + ch + W + K + maj
+ vpaddq h, h, a2 ; h = h + ch + W + K + maj + sigma0
+ ROTATE_ARGS
+
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ vmovdqa %%T1, [SZ2*((%%i-15)&0xf) + rsp + _DATA]
+ vmovdqa a1, [SZ2*((%%i-2)&0xf) + rsp + _DATA]
+ vmovdqa a0, %%T1
+ PRORQ %%T1, 8-1
+ vmovdqa a2, a1
+ PRORQ a1, 61-19
+ vpxor %%T1, %%T1, a0
+ PRORQ %%T1, 1
+ vpxor a1, a1, a2
+ PRORQ a1, 19
+ vpsrlq a0, a0, 7
+ vpxor %%T1, %%T1, a0
+ vpsrlq a2, a2, 6
+ vpxor a1, a1, a2
+ vpaddq %%T1, %%T1, [SZ2*((%%i-16)&0xf) + rsp + _DATA]
+ vpaddq a1, a1, [SZ2*((%%i-7)&0xf) + rsp + _DATA]
+ vpaddq %%T1, %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+
+%endm
+
+
+
+;; SHA512_ARGS:
+;; UINT128 digest[8]; // transposed digests
+;; UINT8 *data_ptr[2];
+;;
+
+;; void sha512_x2_avx(SHA512_ARGS *args, UINT64 msg_size_in_blocks)
+;; arg 1 : STATE : pointer args
+;; arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+;;
+MKGLOBAL(sha512_x2_avx,function,internal)
+align 32
+sha512_x2_avx:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+
+ sub rsp, STACK_size
+
+ ;; Load the pre-transposed incoming digest.
+ vmovdqa a,[STATE + 0 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa b,[STATE + 1 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa c,[STATE + 2 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa d,[STATE + 3 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa e,[STATE + 4 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa f,[STATE + 5 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa g,[STATE + 6 * SHA512_DIGEST_ROW_SIZE]
+ vmovdqa h,[STATE + 7 * SHA512_DIGEST_ROW_SIZE]
+
+ lea TBL,[rel K512_2]
+
+ ;; load the address of each of the 2 message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha512 +0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha512 +1*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+
+ xor ROUND, ROUND
+
+ ;; save old digest
+ vmovdqa [rsp + _DIGEST + 0*SZ2], a
+ vmovdqa [rsp + _DIGEST + 1*SZ2], b
+ vmovdqa [rsp + _DIGEST + 2*SZ2], c
+ vmovdqa [rsp + _DIGEST + 3*SZ2], d
+ vmovdqa [rsp + _DIGEST + 4*SZ2], e
+ vmovdqa [rsp + _DIGEST + 5*SZ2], f
+ vmovdqa [rsp + _DIGEST + 6*SZ2], g
+ vmovdqa [rsp + _DIGEST + 7*SZ2], h
+
+%assign i 0
+%rep 8
+ ;; load up the shuffler for little-endian to big-endian format
+ vmovdqa TMP, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ VMOVPD TT0,[inp0+IDX+i*16] ;; double precision is 64 bits
+ VMOVPD TT2,[inp1+IDX+i*16]
+
+ TRANSPOSE TT0, TT2, TT1
+ vpshufb TT0, TT0, TMP
+ vpshufb TT1, TT1, TMP
+
+ ROUND_00_15 TT0,(i*2+0)
+ ROUND_00_15 TT1,(i*2+1)
+%assign i (i+1)
+%endrep
+
+;; Increment IDX by message block size == 8 (loop) * 16 (XMM width in bytes)
+ add IDX, 8 * 16
+
+%assign i (i*4)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ vpaddq a, a, [rsp + _DIGEST + 0*SZ2]
+ vpaddq b, b, [rsp + _DIGEST + 1*SZ2]
+ vpaddq c, c, [rsp + _DIGEST + 2*SZ2]
+ vpaddq d, d, [rsp + _DIGEST + 3*SZ2]
+ vpaddq e, e, [rsp + _DIGEST + 4*SZ2]
+ vpaddq f, f, [rsp + _DIGEST + 5*SZ2]
+ vpaddq g, g, [rsp + _DIGEST + 6*SZ2]
+ vpaddq h, h, [rsp + _DIGEST + 7*SZ2]
+
+ sub INP_SIZE, 1 ;; consumed one message block
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ vmovdqa [STATE+0*SHA512_DIGEST_ROW_SIZE],a
+ vmovdqa [STATE+1*SHA512_DIGEST_ROW_SIZE],b
+ vmovdqa [STATE+2*SHA512_DIGEST_ROW_SIZE],c
+ vmovdqa [STATE+3*SHA512_DIGEST_ROW_SIZE],d
+ vmovdqa [STATE+4*SHA512_DIGEST_ROW_SIZE],e
+ vmovdqa [STATE+5*SHA512_DIGEST_ROW_SIZE],f
+ vmovdqa [STATE+6*SHA512_DIGEST_ROW_SIZE],g
+ vmovdqa [STATE+7*SHA512_DIGEST_ROW_SIZE],h
+
+ ; update input pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha512 + 0*PTR_SZ], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha512 + 1*PTR_SZ], inp1
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame ((16 + 8)*16 bytes)
+%ifdef SAFE_DATA
+ vpxor xmm0, xmm0
+%assign i 0
+%rep (16+NUM_SHA512_DIGEST_WORDS)
+ vmovdqa [rsp + i*SZ2], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, STACK_size
+
+ ; outer calling routine restores XMM and other GP registers
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/sha_256_mult_avx.asm b/src/spdk/intel-ipsec-mb/avx/sha_256_mult_avx.asm
new file mode 100644
index 000000000..c1895a3f5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/sha_256_mult_avx.asm
@@ -0,0 +1,391 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute quad SHA256 using AVX
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rbx rdx r8 r9 r10 r11 r12
+;; Windows preserves: rcx rsi rdi rbp r12 r14 r15
+;;
+;; Linux clobbers: rax rbx rsi r8 r9 r10 r11 r12
+;; Linux preserves: rcx rdx rdi rbp r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+extern K256_4
+
+%ifdef LINUX
+ %define arg1 rdi
+ %define arg2 rsi
+%else
+ ; Windows definitions
+ %define arg1 rcx
+ %define arg2 rdx
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND rbx
+%define TBL r12
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+
+%define a xmm0
+%define b xmm1
+%define c xmm2
+%define d xmm3
+%define e xmm4
+%define f xmm5
+%define g xmm6
+%define h xmm7
+
+%define a0 xmm8
+%define a1 xmm9
+%define a2 xmm10
+
+%define TT0 xmm14
+%define TT1 xmm13
+%define TT2 xmm12
+%define TT3 xmm11
+%define TT4 xmm10
+%define TT5 xmm9
+
+%define T1 xmm14
+%define TMP xmm15
+
+%define SZ4 4*SHA256_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 64*SZ4
+
+; Define stack usage
+struc STACK
+_DATA: resb SZ4 * 16
+_DIGEST: resb SZ4 * NUM_SHA256_DIGEST_WORDS
+ resb 8 ; for alignment, must be odd multiple of 8
+endstruc
+
+%define VMOVPS vmovups
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ vshufps %%r1, %%t0, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+
+ vshufps %%r3, %%r0, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ vshufps %%r0, %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ vshufps %%t0, %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+
+
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORD reg, imm, tmp
+%macro PRORD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpslld %%tmp, %%reg, (32-(%%imm))
+ vpsrld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; non-destructive
+; PRORD_nd reg, imm, tmp, src
+%macro PRORD_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ ;vmovdqa %%tmp, %%reg
+ vpslld %%tmp, %%src, (32-(%%imm))
+ vpsrld %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; PRORD dst/src, amt
+%macro PRORD 2
+ PRORD %1, %2, TMP
+%endmacro
+
+; PRORD_nd dst, src, amt
+%macro PRORD_nd 3
+ PRORD_nd %1, %3, TMP, %2
+%endmacro
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ PRORD_nd a0, e, (11-6) ; sig1: a0 = (e >> 5)
+
+ vpxor a2, f, g ; ch: a2 = f^g
+ vpand a2, a2, e ; ch: a2 = (f^g)&e
+ vpxor a2, a2, g ; a2 = ch
+
+ PRORD_nd a1, e, 25 ; sig1: a1 = (e >> 25)
+ vmovdqa [SZ4*(%%i&0xf) + rsp + _DATA], %%T1
+ vpaddd %%T1, %%T1, [TBL + ROUND] ; T1 = W + K
+ vpxor a0, a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORD a0, 6 ; sig1: a0 = (e >> 6) ^ (e >> 11)
+ vpaddd h, h, a2 ; h = h + ch
+ PRORD_nd a2, a, (13-2) ; sig0: a2 = (a >> 11)
+ vpaddd h, h, %%T1 ; h = h + ch + W + K
+ vpxor a0, a0, a1 ; a0 = sigma1
+ PRORD_nd a1, a, 22 ; sig0: a1 = (a >> 22)
+ vpxor %%T1, a, c ; maj: T1 = a^c
+ add ROUND, SZ4 ; ROUND++
+ vpand %%T1, %%T1, b ; maj: T1 = (a^c)&b
+ vpaddd h, h, a0
+
+ vpaddd d, d, h
+
+ vpxor a2, a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORD a2, 2 ; sig0: a2 = (a >> 2) ^ (a >> 13)
+ vpxor a2, a2, a1 ; a2 = sig0
+ vpand a1, a, c ; maj: a1 = a&c
+ vpor a1, a1, %%T1 ; a1 = maj
+ vpaddd h, h, a1 ; h = h + ch + W + K + maj
+ vpaddd h, h, a2 ; h = h + ch + W + K + maj + sigma0
+
+ ROTATE_ARGS
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ vmovdqa %%T1, [SZ4*((%%i-15)&0xf) + rsp + _DATA]
+ vmovdqa a1, [SZ4*((%%i-2)&0xf) + rsp + _DATA]
+ vmovdqa a0, %%T1
+ PRORD %%T1, 18-7
+ vmovdqa a2, a1
+ PRORD a1, 19-17
+ vpxor %%T1, %%T1, a0
+ PRORD %%T1, 7
+ vpxor a1, a1, a2
+ PRORD a1, 17
+ vpsrld a0, a0, 3
+ vpxor %%T1, %%T1, a0
+ vpsrld a2, a2, 10
+ vpxor a1, a1, a2
+ vpaddd %%T1, %%T1, [SZ4*((%%i-16)&0xf) + rsp + _DATA]
+ vpaddd a1, a1, [SZ4*((%%i-7)&0xf) + rsp + _DATA]
+ vpaddd %%T1, %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+%endm
+
+section .data
+default rel
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+;; SHA256_ARGS:
+;; UINT128 digest[8]; // transposed digests
+;; UINT8 *data_ptr[4];
+;;
+
+;; void sha_256_mult_avx(SHA256_ARGS *args, UINT64 num_blocks);
+;; arg 1 : STATE : pointer args
+;; arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+;;
+MKGLOBAL(sha_256_mult_avx,function,internal)
+align 16
+sha_256_mult_avx:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+ sub rsp, STACK_size
+
+ ;; Load the pre-transposed incoming digest.
+ vmovdqa a,[STATE+0*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa b,[STATE+1*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa c,[STATE+2*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa d,[STATE+3*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa e,[STATE+4*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa f,[STATE+5*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa g,[STATE+6*SHA256_DIGEST_ROW_SIZE]
+ vmovdqa h,[STATE+7*SHA256_DIGEST_ROW_SIZE]
+
+ lea TBL,[rel K256_4]
+
+ ;; load the address of each of the 4 message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha256 + 0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha256 + 1*PTR_SZ]
+ mov inp2,[STATE + _data_ptr_sha256 + 2*PTR_SZ]
+ mov inp3,[STATE + _data_ptr_sha256 + 3*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ ;; save old digest
+ vmovdqa [rsp + _DIGEST + 0*SZ4], a
+ vmovdqa [rsp + _DIGEST + 1*SZ4], b
+ vmovdqa [rsp + _DIGEST + 2*SZ4], c
+ vmovdqa [rsp + _DIGEST + 3*SZ4], d
+ vmovdqa [rsp + _DIGEST + 4*SZ4], e
+ vmovdqa [rsp + _DIGEST + 5*SZ4], f
+ vmovdqa [rsp + _DIGEST + 6*SZ4], g
+ vmovdqa [rsp + _DIGEST + 7*SZ4], h
+
+%assign i 0
+%rep 4
+ vmovdqa TMP, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ VMOVPS TT2,[inp0+IDX+i*16]
+ VMOVPS TT1,[inp1+IDX+i*16]
+ VMOVPS TT4,[inp2+IDX+i*16]
+ VMOVPS TT3,[inp3+IDX+i*16]
+ TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
+ vpshufb TT0, TT0, TMP
+ vpshufb TT1, TT1, TMP
+ vpshufb TT2, TT2, TMP
+ vpshufb TT3, TT3, TMP
+ ROUND_00_15 TT0,(i*4+0)
+ ROUND_00_15 TT1,(i*4+1)
+ ROUND_00_15 TT2,(i*4+2)
+ ROUND_00_15 TT3,(i*4+3)
+%assign i (i+1)
+%endrep
+ add IDX, 4*4*4
+
+%assign i (i*4)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ vpaddd a, a, [rsp + _DIGEST + 0*SZ4]
+ vpaddd b, b, [rsp + _DIGEST + 1*SZ4]
+ vpaddd c, c, [rsp + _DIGEST + 2*SZ4]
+ vpaddd d, d, [rsp + _DIGEST + 3*SZ4]
+ vpaddd e, e, [rsp + _DIGEST + 4*SZ4]
+ vpaddd f, f, [rsp + _DIGEST + 5*SZ4]
+ vpaddd g, g, [rsp + _DIGEST + 6*SZ4]
+ vpaddd h, h, [rsp + _DIGEST + 7*SZ4]
+
+ sub INP_SIZE, 1 ;; unit is blocks
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ vmovdqa [STATE+0*SHA256_DIGEST_ROW_SIZE],a
+ vmovdqa [STATE+1*SHA256_DIGEST_ROW_SIZE],b
+ vmovdqa [STATE+2*SHA256_DIGEST_ROW_SIZE],c
+ vmovdqa [STATE+3*SHA256_DIGEST_ROW_SIZE],d
+ vmovdqa [STATE+4*SHA256_DIGEST_ROW_SIZE],e
+ vmovdqa [STATE+5*SHA256_DIGEST_ROW_SIZE],f
+ vmovdqa [STATE+6*SHA256_DIGEST_ROW_SIZE],g
+ vmovdqa [STATE+7*SHA256_DIGEST_ROW_SIZE],h
+
+ ; update input pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha256 + 0*8], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha256 + 1*8], inp1
+ add inp2, IDX
+ mov [STATE + _data_ptr_sha256 + 2*8], inp2
+ add inp3, IDX
+ mov [STATE + _data_ptr_sha256 + 3*8], inp3
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+%ifdef SAFE_DATA
+ ;; Clear stack frame ((16 + 8)*16 bytes)
+ vpxor xmm0, xmm0
+%assign i 0
+%rep (16+NUM_SHA256_DIGEST_WORDS)
+ vmovdqa [rsp + i*SZ4], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, STACK_size
+ ; outer calling routine restores XMM and other GP registers
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/snow3g_avx.c b/src/spdk/intel-ipsec-mb/avx/snow3g_avx.c
new file mode 100644
index 000000000..8c6995fb8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/snow3g_avx.c
@@ -0,0 +1,42 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#define AVX
+#define SNOW3G_F8_1_BUFFER_BIT snow3g_f8_1_buffer_bit_avx
+#define SNOW3G_F8_1_BUFFER snow3g_f8_1_buffer_avx
+#define SNOW3G_F8_2_BUFFER snow3g_f8_2_buffer_avx
+#define SNOW3G_F8_4_BUFFER snow3g_f8_4_buffer_avx
+#define SNOW3G_F8_8_BUFFER snow3g_f8_8_buffer_avx
+#define SNOW3G_F8_N_BUFFER snow3g_f8_n_buffer_avx
+#define SNOW3G_F8_8_BUFFER_MULTIKEY snow3g_f8_8_buffer_multikey_avx
+#define SNOW3G_F8_N_BUFFER_MULTIKEY snow3g_f8_n_buffer_multikey_avx
+#define SNOW3G_F9_1_BUFFER snow3g_f9_1_buffer_avx
+#define SNOW3G_INIT_KEY_SCHED snow3g_init_key_sched_avx
+#define SNOW3G_KEY_SCHED_SIZE snow3g_key_sched_size_avx
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_avx
+
+#include "include/snow3g_common.h"
diff --git a/src/spdk/intel-ipsec-mb/avx/zuc_avx.asm b/src/spdk/intel-ipsec-mb/avx/zuc_avx.asm
new file mode 100755
index 000000000..e7c6bad8a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/zuc_avx.asm
@@ -0,0 +1,1146 @@
+;;
+;; Copyright (c) 2009-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+
+extern lookup_8bit_avx
+
+section .data
+default rel
+align 64
+S0:
+db 0x3e,0x72,0x5b,0x47,0xca,0xe0,0x00,0x33,0x04,0xd1,0x54,0x98,0x09,0xb9,0x6d,0xcb
+db 0x7b,0x1b,0xf9,0x32,0xaf,0x9d,0x6a,0xa5,0xb8,0x2d,0xfc,0x1d,0x08,0x53,0x03,0x90
+db 0x4d,0x4e,0x84,0x99,0xe4,0xce,0xd9,0x91,0xdd,0xb6,0x85,0x48,0x8b,0x29,0x6e,0xac
+db 0xcd,0xc1,0xf8,0x1e,0x73,0x43,0x69,0xc6,0xb5,0xbd,0xfd,0x39,0x63,0x20,0xd4,0x38
+db 0x76,0x7d,0xb2,0xa7,0xcf,0xed,0x57,0xc5,0xf3,0x2c,0xbb,0x14,0x21,0x06,0x55,0x9b
+db 0xe3,0xef,0x5e,0x31,0x4f,0x7f,0x5a,0xa4,0x0d,0x82,0x51,0x49,0x5f,0xba,0x58,0x1c
+db 0x4a,0x16,0xd5,0x17,0xa8,0x92,0x24,0x1f,0x8c,0xff,0xd8,0xae,0x2e,0x01,0xd3,0xad
+db 0x3b,0x4b,0xda,0x46,0xeb,0xc9,0xde,0x9a,0x8f,0x87,0xd7,0x3a,0x80,0x6f,0x2f,0xc8
+db 0xb1,0xb4,0x37,0xf7,0x0a,0x22,0x13,0x28,0x7c,0xcc,0x3c,0x89,0xc7,0xc3,0x96,0x56
+db 0x07,0xbf,0x7e,0xf0,0x0b,0x2b,0x97,0x52,0x35,0x41,0x79,0x61,0xa6,0x4c,0x10,0xfe
+db 0xbc,0x26,0x95,0x88,0x8a,0xb0,0xa3,0xfb,0xc0,0x18,0x94,0xf2,0xe1,0xe5,0xe9,0x5d
+db 0xd0,0xdc,0x11,0x66,0x64,0x5c,0xec,0x59,0x42,0x75,0x12,0xf5,0x74,0x9c,0xaa,0x23
+db 0x0e,0x86,0xab,0xbe,0x2a,0x02,0xe7,0x67,0xe6,0x44,0xa2,0x6c,0xc2,0x93,0x9f,0xf1
+db 0xf6,0xfa,0x36,0xd2,0x50,0x68,0x9e,0x62,0x71,0x15,0x3d,0xd6,0x40,0xc4,0xe2,0x0f
+db 0x8e,0x83,0x77,0x6b,0x25,0x05,0x3f,0x0c,0x30,0xea,0x70,0xb7,0xa1,0xe8,0xa9,0x65
+db 0x8d,0x27,0x1a,0xdb,0x81,0xb3,0xa0,0xf4,0x45,0x7a,0x19,0xdf,0xee,0x78,0x34,0x60
+
+S1:
+db 0x55,0xc2,0x63,0x71,0x3b,0xc8,0x47,0x86,0x9f,0x3c,0xda,0x5b,0x29,0xaa,0xfd,0x77
+db 0x8c,0xc5,0x94,0x0c,0xa6,0x1a,0x13,0x00,0xe3,0xa8,0x16,0x72,0x40,0xf9,0xf8,0x42
+db 0x44,0x26,0x68,0x96,0x81,0xd9,0x45,0x3e,0x10,0x76,0xc6,0xa7,0x8b,0x39,0x43,0xe1
+db 0x3a,0xb5,0x56,0x2a,0xc0,0x6d,0xb3,0x05,0x22,0x66,0xbf,0xdc,0x0b,0xfa,0x62,0x48
+db 0xdd,0x20,0x11,0x06,0x36,0xc9,0xc1,0xcf,0xf6,0x27,0x52,0xbb,0x69,0xf5,0xd4,0x87
+db 0x7f,0x84,0x4c,0xd2,0x9c,0x57,0xa4,0xbc,0x4f,0x9a,0xdf,0xfe,0xd6,0x8d,0x7a,0xeb
+db 0x2b,0x53,0xd8,0x5c,0xa1,0x14,0x17,0xfb,0x23,0xd5,0x7d,0x30,0x67,0x73,0x08,0x09
+db 0xee,0xb7,0x70,0x3f,0x61,0xb2,0x19,0x8e,0x4e,0xe5,0x4b,0x93,0x8f,0x5d,0xdb,0xa9
+db 0xad,0xf1,0xae,0x2e,0xcb,0x0d,0xfc,0xf4,0x2d,0x46,0x6e,0x1d,0x97,0xe8,0xd1,0xe9
+db 0x4d,0x37,0xa5,0x75,0x5e,0x83,0x9e,0xab,0x82,0x9d,0xb9,0x1c,0xe0,0xcd,0x49,0x89
+db 0x01,0xb6,0xbd,0x58,0x24,0xa2,0x5f,0x38,0x78,0x99,0x15,0x90,0x50,0xb8,0x95,0xe4
+db 0xd0,0x91,0xc7,0xce,0xed,0x0f,0xb4,0x6f,0xa0,0xcc,0xf0,0x02,0x4a,0x79,0xc3,0xde
+db 0xa3,0xef,0xea,0x51,0xe6,0x6b,0x18,0xec,0x1b,0x2c,0x80,0xf7,0x74,0xe7,0xff,0x21
+db 0x5a,0x6a,0x54,0x1e,0x41,0x31,0x92,0x35,0xc4,0x33,0x07,0x0a,0xba,0x7e,0x0e,0x34
+db 0x88,0xb1,0x98,0x7c,0xf3,0x3d,0x60,0x6c,0x7b,0xca,0xd3,0x1f,0x32,0x65,0x04,0x28
+db 0x64,0xbe,0x85,0x9b,0x2f,0x59,0x8a,0xd7,0xb0,0x25,0xac,0xaf,0x12,0x03,0xe2,0xf2
+
+EK_d:
+dw 0x44D7, 0x26BC, 0x626B, 0x135E, 0x5789, 0x35E2, 0x7135, 0x09AF,
+dw 0x4D78, 0x2F13, 0x6BC4, 0x1AF1, 0x5E26, 0x3C4D, 0x789A, 0x47AC
+
+mask31:
+dd 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF
+
+align 16
+bit_reverse_table_l:
+db 0x00, 0x08, 0x04, 0x0c, 0x02, 0x0a, 0x06, 0x0e, 0x01, 0x09, 0x05, 0x0d, 0x03, 0x0b, 0x07, 0x0f
+
+align 16
+bit_reverse_table_h:
+db 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0
+
+align 16
+bit_reverse_and_table:
+db 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f
+
+align 16
+data_mask_64bits:
+dd 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
+
+bit_mask_table:
+db 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe
+
+
+section .text
+align 64
+
+%define OFFSET_FR1 (16*4)
+%define OFFSET_FR2 (17*4)
+%define OFFSET_BRC_X0 (18*4)
+%define OFFSET_BRC_X1 (19*4)
+%define OFFSET_BRC_X2 (20*4)
+%define OFFSET_BRC_X3 (21*4)
+
+%define MASK31 xmm12
+
+%define OFS_R1 (16*(4*4))
+%define OFS_R2 (OFS_R1 + (4*4))
+%define OFS_X0 (OFS_R2 + (4*4))
+%define OFS_X1 (OFS_X0 + (4*4))
+%define OFS_X2 (OFS_X1 + (4*4))
+%define OFS_X3 (OFS_X2 + (4*4))
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define VARIABLE_OFFSET XMM_STORAGE
+
+%macro FUNC_SAVE 0
+ push r12
+ push r13
+ push r14
+ push r15
+%ifidn __OUTPUT_FORMAT__, win64
+ push rdi
+ push rsi
+%endif
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ vmovdqu [rsp + 0*16],xmm6
+ vmovdqu [rsp + 1*16],xmm7
+ vmovdqu [rsp + 2*16],xmm8
+ vmovdqu [rsp + 3*16],xmm9
+ vmovdqu [rsp + 4*16],xmm10
+ vmovdqu [rsp + 5*16],xmm11
+ vmovdqu [rsp + 6*16],xmm12
+ vmovdqu [rsp + 7*16],xmm13
+ vmovdqu [rsp + 8*16],xmm14
+ vmovdqu [rsp + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + 9*16]
+ vmovdqu xmm14, [rsp + 8*16]
+ vmovdqu xmm13, [rsp + 7*16]
+ vmovdqu xmm12, [rsp + 6*16]
+ vmovdqu xmm11, [rsp + 5*16]
+ vmovdqu xmm10, [rsp + 4*16]
+ vmovdqu xmm9, [rsp + 3*16]
+ vmovdqu xmm8, [rsp + 2*16]
+ vmovdqu xmm7, [rsp + 1*16]
+ vmovdqu xmm6, [rsp + 0*16]
+%endif
+ mov rsp, r14
+%ifidn __OUTPUT_FORMAT__, win64
+ pop rsi
+ pop rdi
+%endif
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+
+;;
+;; make_u31()
+;;
+%macro make_u31 4
+
+%define %%Rt %1
+%define %%Ke %2
+%define %%Ek %3
+%define %%Iv %4
+ xor %%Rt, %%Rt
+ shrd %%Rt, %%Iv, 8
+ shrd %%Rt, %%Ek, 15
+ shrd %%Rt, %%Ke, 9
+%endmacro
+
+
+;
+; bits_reorg4()
+;
+; params
+; %1 - round number
+; rax - LFSR pointer
+; uses
+;
+; return
+;
+%macro bits_reorg4 1
+ ;
+ ; xmm15 = LFSR_S15
+ ; xmm14 = LFSR_S14
+ ; xmm11 = LFSR_S11
+ ; xmm9 = LFSR_S9
+ ; xmm7 = LFSR_S7
+ ; xmm5 = LFSR_S5
+ ; xmm2 = LFSR_S2
+ ; xmm0 = LFSR_S0
+ ;
+ vmovdqa xmm15, [rax + ((15 + %1) % 16)*16]
+ vmovdqa xmm14, [rax + ((14 + %1) % 16)*16]
+ vmovdqa xmm11, [rax + ((11 + %1) % 16)*16]
+ vmovdqa xmm9, [rax + (( 9 + %1) % 16)*16]
+ vmovdqa xmm7, [rax + (( 7 + %1) % 16)*16]
+ vmovdqa xmm5, [rax + (( 5 + %1) % 16)*16]
+ vmovdqa xmm2, [rax + (( 2 + %1) % 16)*16]
+ vmovdqa xmm0, [rax + (( 0 + %1) % 16)*16]
+
+ vpxor xmm1, xmm1
+ vpslld xmm15, 1
+ vpblendw xmm3, xmm14, xmm1, 0xAA
+ vpblendw xmm15, xmm3, xmm15, 0xAA
+
+ vmovdqa [rax + OFS_X0], xmm15 ; BRC_X0
+ vpslld xmm11, 16
+ vpsrld xmm9, 15
+ vpor xmm11, xmm9
+ vmovdqa [rax + OFS_X1], xmm11 ; BRC_X1
+ vpslld xmm7, 16
+ vpsrld xmm5, 15
+ vpor xmm7, xmm5
+ vmovdqa [rax + OFS_X2], xmm7 ; BRC_X2
+ vpslld xmm2, 16
+ vpsrld xmm0, 15
+ vpor xmm2, xmm0
+ vmovdqa [rax + OFS_X3], xmm2 ; BRC_X3
+%endmacro
+
+%macro lookup_single_sbox 2
+%define %%table %1 ; [in] Pointer to table to look up
+%define %%idx_val %2 ; [in/out] Index to look up and returned value (rcx, rdx, r8, r9)
+
+%ifdef SAFE_LOOKUP
+ ;; Save all registers used in lookup_8bit (xmm0-5, r9,r10)
+ ;; and registers for param passing and return (4 regs, OS dependent)
+ ;; (6*16 + 6*8 = 144 bytes)
+ sub rsp, 144
+
+ vmovdqu [rsp], xmm0
+ vmovdqu [rsp + 16], xmm1
+ vmovdqu [rsp + 32], xmm2
+ vmovdqu [rsp + 48], xmm3
+ vmovdqu [rsp + 64], xmm4
+ vmovdqu [rsp + 80], xmm5
+ mov [rsp + 96], r9
+ mov [rsp + 104], r10
+
+%ifdef LINUX
+ mov [rsp + 112], rdi
+ mov [rsp + 120], rsi
+ mov [rsp + 128], rdx
+ mov rdi, %%table
+ mov rsi, %%idx_val
+ mov rdx, 256
+%else
+%ifnidni %%idx_val, rcx
+ mov [rsp + 112], rcx
+%endif
+%ifnidni %%idx_val, rdx
+ mov [rsp + 120], rdx
+%endif
+%ifnidni %%idx_val, r8
+ mov [rsp + 128], r8
+%endif
+
+ mov rdx, %%idx_val
+ mov rcx, %%table
+ mov r8, 256
+%endif
+ mov [rsp + 136], rax
+
+ call lookup_8bit_avx
+
+ ;; Restore all registers
+ vmovdqu xmm0, [rsp]
+ vmovdqu xmm1, [rsp + 16]
+ vmovdqu xmm2, [rsp + 32]
+ vmovdqu xmm3, [rsp + 48]
+ vmovdqu xmm4, [rsp + 64]
+ vmovdqu xmm5, [rsp + 80]
+ mov r9, [rsp + 96]
+ mov r10, [rsp + 104]
+
+%ifdef LINUX
+ mov rdi, [rsp + 112]
+ mov rsi, [rsp + 120]
+ mov rdx, [rsp + 128]
+%else
+%ifnidni %%idx_val, rcx
+ mov rcx, [rsp + 112]
+%endif
+%ifnidni %%idx_val, rdx
+ mov rdx, [rsp + 120]
+%endif
+%ifnidni %%idx_val, rcx
+ mov r8, [rsp + 128]
+%endif
+%endif
+
+ ;; Move returned value from lookup function, before restoring rax
+ mov DWORD(%%idx_val), eax
+ mov rax, [rsp + 136]
+
+ add rsp, 144
+
+%else ;; SAFE_LOOKUP
+
+ movzx DWORD(%%idx_val), BYTE [%%table + %%idx_val]
+
+%endif ;; SAFE_LOOKUP
+%endmacro
+
+;
+; sbox_lkup()
+;
+; params
+; %1 R1/R2 table offset
+; %2 R1/R2 entry offset
+; %3 xmm reg name
+; uses
+; rcx,rdx,r8,r9,r10,rsi
+; return
+;
+%macro sbox_lkup 3
+ vpextrb rcx, %3, (0 + (%2 * 4))
+ lookup_single_sbox rsi, rcx
+ vpextrb rdx, %3, (1 + (%2 * 4))
+ lookup_single_sbox rdi, rdx
+
+ xor r10, r10
+ vpextrb r8, %3, (2 + (%2 * 4))
+ lookup_single_sbox rsi, r8
+ vpextrb r9, %3, (3 + (%2 * 4))
+ lookup_single_sbox rdi, r9
+
+ shrd r10d, ecx, 8
+ shrd r10d, edx, 8
+ shrd r10d, r8d, 8
+ shrd r10d, r9d, 8
+ mov [rax + %1 + (%2 * 4)], r10d
+%endmacro
+
+
+;
+; rot_mod32()
+;
+; uses xmm7
+;
+%macro rot_mod32 3
+ vpslld %1, %2, %3
+ vpsrld xmm7, %2, (32 - %3)
+
+ vpor %1, xmm7
+%endmacro
+
+
+;
+; nonlin_fun4()
+;
+; params
+; %1 == 1, then calculate W
+; uses
+;
+; return
+; xmm0 = W value, updates F_R1[] / F_R2[]
+;
+%macro nonlin_fun4 1
+
+%if (%1 == 1)
+ vmovdqa xmm0, [rax + OFS_X0]
+ vpxor xmm0, [rax + OFS_R1]
+ vpaddd xmm0, [rax + OFS_R2] ; W = (BRC_X0 ^ F_R1) + F_R2
+%endif
+ ;
+ vmovdqa xmm1, [rax + OFS_R1]
+ vmovdqa xmm2, [rax + OFS_R2]
+ vpaddd xmm1, [rax + OFS_X1] ; W1 = F_R1 + BRC_X1
+ vpxor xmm2, [rax + OFS_X2] ; W2 = F_R2 ^ BRC_X2
+ ;
+
+ vpslld xmm3, xmm1, 16
+ vpsrld xmm4, xmm1, 16
+ vpslld xmm5, xmm2, 16
+ vpsrld xmm6, xmm2, 16
+ vpor xmm1, xmm3, xmm6
+ vpor xmm2, xmm4, xmm5
+
+ ;
+ rot_mod32 xmm3, xmm1, 2
+ rot_mod32 xmm4, xmm1, 10
+ rot_mod32 xmm5, xmm1, 18
+ rot_mod32 xmm6, xmm1, 24
+ vpxor xmm1, xmm3
+ vpxor xmm1, xmm4
+ vpxor xmm1, xmm5
+ vpxor xmm1, xmm6 ; XMM1 = U = L1(P)
+
+ sbox_lkup OFS_R1, 0, xmm1 ; F_R1[0]
+ sbox_lkup OFS_R1, 1, xmm1 ; F_R1[1]
+ sbox_lkup OFS_R1, 2, xmm1 ; F_R1[2]
+ sbox_lkup OFS_R1, 3, xmm1 ; F_R1[3]
+ ;
+ rot_mod32 xmm3, xmm2, 8
+ rot_mod32 xmm4, xmm2, 14
+ rot_mod32 xmm5, xmm2, 22
+ rot_mod32 xmm6, xmm2, 30
+ vpxor xmm2, xmm3
+ vpxor xmm2, xmm4
+ vpxor xmm2, xmm5
+ vpxor xmm2, xmm6 ; XMM2 = V = L2(Q)
+ ;
+
+ sbox_lkup OFS_R2, 0, xmm2 ; F_R2[0]
+ sbox_lkup OFS_R2, 1, xmm2 ; F_R2[1]
+ sbox_lkup OFS_R2, 2, xmm2 ; F_R2[2]
+ sbox_lkup OFS_R2, 3, xmm2 ; F_R2[3]
+%endmacro
+
+
+;
+; store_kstr4()
+;
+; params
+;
+; uses
+; xmm0 as input
+; return
+;
+%macro store_kstr4 0
+ vpxor xmm0, [rax + OFS_X3]
+ vpextrd r15d, xmm0, 3
+ pop r9 ; *pKeyStr4
+ vpextrd r14d, xmm0, 2
+ pop r8 ; *pKeyStr3
+ vpextrd r13d, xmm0, 1
+ pop rdx ; *pKeyStr2
+ vpextrd r12d, xmm0, 0
+ pop rcx ; *pKeyStr1
+ mov [r9], r15d
+ mov [r8], r14d
+ mov [rdx], r13d
+ mov [rcx], r12d
+ add rcx, 4
+ add rdx, 4
+ add r8, 4
+ add r9, 4
+ push rcx
+ push rdx
+ push r8
+ push r9
+%endmacro
+
+
+;
+; add_mod31()
+; add two 32-bit args and reduce mod (2^31-1)
+; params
+; %1 - arg1/res
+; %2 - arg2
+; uses
+; xmm2
+; return
+; %1
+%macro add_mod31 2
+ vpaddd %1, %2
+ vpsrld xmm2, %1, 31
+ vpand %1, MASK31
+ vpaddd %1, xmm2
+%endmacro
+
+
+;
+; rot_mod31()
+; rotate (mult by pow of 2) 32-bit arg and reduce mod (2^31-1)
+; params
+; %1 - arg
+; %2 - # of bits
+; uses
+; xmm2
+; return
+; %1
+%macro rot_mod31 2
+
+ vpslld xmm2, %1, %2
+ vpsrld %1, %1, (31 - %2)
+
+ vpor %1, xmm2
+ vpand %1, MASK31
+%endmacro
+
+
+;
+; lfsr_updt4()
+;
+; params
+; %1 - round number
+; uses
+; xmm0 as input (ZERO or W)
+; return
+;
+%macro lfsr_updt4 1
+ ;
+ ; xmm1 = LFSR_S0
+ ; xmm4 = LFSR_S4
+ ; xmm10 = LFSR_S10
+ ; xmm13 = LFSR_S13
+ ; xmm15 = LFSR_S15
+ ;
+ vpxor xmm3, xmm3
+ vmovdqa xmm1, [rax + (( 0 + %1) % 16)*16]
+ vmovdqa xmm4, [rax + (( 4 + %1) % 16)*16]
+ vmovdqa xmm10, [rax + ((10 + %1) % 16)*16]
+ vmovdqa xmm13, [rax + ((13 + %1) % 16)*16]
+ vmovdqa xmm15, [rax + ((15 + %1) % 16)*16]
+
+ ; Calculate LFSR feedback
+ add_mod31 xmm0, xmm1
+ rot_mod31 xmm1, 8
+ add_mod31 xmm0, xmm1
+ rot_mod31 xmm4, 20
+ add_mod31 xmm0, xmm4
+ rot_mod31 xmm10, 21
+ add_mod31 xmm0, xmm10
+ rot_mod31 xmm13, 17
+ add_mod31 xmm0, xmm13
+ rot_mod31 xmm15, 15
+ add_mod31 xmm0, xmm15
+
+
+
+ vmovdqa [rax + (( 0 + %1) % 16)*16], xmm0
+
+ ; LFSR_S16 = (LFSR_S15++) = eax
+%endmacro
+
+
+;
+; key_expand_4()
+;
+%macro key_expand_4 2
+ movzx r8d, byte [rdi + (%1 + 0)]
+ movzx r9d, word [rbx + ((%1 + 0)*2)]
+ movzx r10d, byte [rsi + (%1 + 0)]
+ make_u31 r11d, r8d, r9d, r10d
+ mov [rax + (((%1 + 0)*16)+(%2*4))], r11d
+
+ movzx r12d, byte [rdi + (%1 + 1)]
+ movzx r13d, word [rbx + ((%1 + 1)*2)]
+ movzx r14d, byte [rsi + (%1 + 1)]
+ make_u31 r15d, r12d, r13d, r14d
+ mov [rax + (((%1 + 1)*16)+(%2*4))], r15d
+%endmacro
+
+
+MKGLOBAL(asm_ZucInitialization_4_avx,function,internal)
+asm_ZucInitialization_4_avx:
+
+%ifdef LINUX
+ %define pKe rdi
+ %define pIv rsi
+ %define pState rdx
+%else
+ %define pKe rcx
+ %define pIv rdx
+ %define pState r8
+%endif
+
+ ; Save non-volatile registers
+ push rbx
+ push rdi
+ push rsi
+ push r12
+ push r13
+ push r14
+ push r15
+ push rdx
+
+ lea rax, [pState] ; load pointer to LFSR
+ push pState ; Save LFSR Pointer to stack
+
+ ; setup the key pointer for first buffer key expand
+ mov rbx, [pKe] ; load the pointer to the array of keys into rbx
+
+ push pKe ; save rdi (key pointer) to the stack
+ lea rdi, [rbx] ; load the pointer to the first key into rdi
+
+
+ ; setup the IV pointer for first buffer key expand
+ mov rcx, [pIv] ; load the pointer to the array of IV's
+ push pIv ; save the IV pointer to the stack
+ lea rsi, [rcx] ; load the first IV pointer
+
+ lea rbx, [EK_d] ; load D variables
+
+ ; Expand key packet 1
+ key_expand_4 0, 0
+ key_expand_4 2, 0
+ key_expand_4 4, 0
+ key_expand_4 6, 0
+ key_expand_4 8, 0
+ key_expand_4 10, 0
+ key_expand_4 12, 0
+ key_expand_4 14, 0
+
+
+ ;second packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+8] ; load offset to IV 2 in array
+ lea rsi, [rcx] ; load pointer to IV2
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+8] ; load offset to key 2 in array
+ lea rdi, [rcx] ; load pointer to Key 2
+
+ push rbx ; save Key pointer
+ push rdx ; save IV pointer
+
+ lea rbx, [EK_d]
+
+ ; Expand key packet 2
+ key_expand_4 0, 1
+ key_expand_4 2, 1
+ key_expand_4 4, 1
+ key_expand_4 6, 1
+ key_expand_4 8, 1
+ key_expand_4 10, 1
+ key_expand_4 12, 1
+ key_expand_4 14, 1
+
+
+
+ ;Third packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+16] ; load offset to IV 3 in array
+ lea rsi, [rcx] ; load pointer to IV3
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+16] ; load offset to key 3 in array
+ lea rdi, [rcx] ; load pointer to Key 3
+
+ push rbx ; save Key pointer
+ push rdx ; save IV pointer
+ lea rbx, [EK_d]
+ ; Expand key packet 3
+ key_expand_4 0, 2
+ key_expand_4 2, 2
+ key_expand_4 4, 2
+ key_expand_4 6, 2
+ key_expand_4 8, 2
+ key_expand_4 10, 2
+ key_expand_4 12, 2
+ key_expand_4 14, 2
+
+
+
+ ;fourth packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+24] ; load offset to IV 4 in array
+ lea rsi, [rcx] ; load pointer to IV4
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+24] ; load offset to key 2 in array
+ lea rdi, [rcx] ; load pointer to Key 2
+ lea rbx, [EK_d]
+ ; Expand key packet 4
+ key_expand_4 0, 3
+ key_expand_4 2, 3
+ key_expand_4 4, 3
+ key_expand_4 6, 3
+ key_expand_4 8, 3
+ key_expand_4 10, 3
+ key_expand_4 12, 3
+ key_expand_4 14, 3
+
+ ; Set R1 and R2 to zero
+ ;xor r10, r10
+ ;xor r11, r11
+
+
+
+ ; Load read-only registers
+ lea rdi, [S0] ; used by sbox_lkup() macro
+ lea rsi, [S1]
+ vmovdqa xmm12, [mask31]
+
+ ; Shift LFSR 32-times, update state variables
+%assign N 0
+%rep 32
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ bits_reorg4 N
+ nonlin_fun4 1
+ vpsrld xmm0,1 ; Shift out LSB of W
+
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ lfsr_updt4 N ; W (xmm0) used in LFSR update - not set to zero
+%assign N N+1
+%endrep
+
+ ; And once more, initial round from keygen phase = 33 times
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ bits_reorg4 0
+ nonlin_fun4 0
+
+ pop rdx
+ lea rax, [rdx]
+
+ vpxor xmm0, xmm0
+ lfsr_updt4 0
+
+
+
+ ; Restore non-volatile registers
+ pop rdx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop rsi
+ pop rdi
+ pop rbx
+
+ ret
+;
+;
+;
+;;
+;; void asm_ZucGenKeystream64B_4_avx(state4_t *pSta, u32* pKeyStr1, u32* pKeyStr2, u32* pKeyStr3, u32* pKeyStr4);
+;;
+;; WIN64
+;; RCX - pSta
+;; RDX - pKeyStr1
+;; R8 - pKeyStr2
+;; R9 - pKeyStr3
+;; Stack - pKeyStr4
+;;
+;; LIN64
+;; RDI - pSta
+;; RSI - pKeyStr1
+;; RDX - pKeyStr2
+;; RCX - pKeyStr3
+;; R8 - pKeyStr4
+;;
+MKGLOBAL(asm_ZucGenKeystream64B_4_avx,function,internal)
+asm_ZucGenKeystream64B_4_avx:
+
+%ifdef LINUX
+ %define pState rdi
+ %define pKS1 rsi
+ %define pKS2 rdx
+ %define pKS3 rcx
+ %define pKS4 r8
+%else
+ %define pState rcx
+ %define pKS1 rdx
+ %define pKS2 r8
+ %define pKS3 r9
+ %define pKS4 rax
+%endif
+
+%ifndef LINUX
+ mov rax, [rsp + 8*5] ; 5th parameter from stack
+%endif
+
+ ; Save non-volatile registers
+ push rbx
+ push r12
+ push r13
+ push r14
+ push r15
+
+%ifndef LINUX
+ push rdi
+ push rsi
+%endif
+ ; Store 4 keystream pointers on the stack
+
+ push pKS1
+ push pKS2
+ push pKS3
+ push pKS4
+
+
+ ; Load state pointer in RAX
+ mov rax, pState
+
+
+ ; Load read-only registers
+ lea rdi, [S0] ; used by sbox_lkup() macro
+ lea rsi, [S1]
+ vmovdqa xmm12, [mask31]
+
+ ; Generate 64B of keystream in 16 rounds
+%assign N 1
+%rep 16
+ bits_reorg4 N
+ nonlin_fun4 1
+ store_kstr4
+ vpxor xmm0, xmm0
+ lfsr_updt4 N
+%assign N N+1
+%endrep
+
+ ; Take keystream pointers off (#push = #pops)
+ pop rax
+ pop rax
+ pop rax
+ pop rax
+
+%ifndef LINUX
+ pop rsi
+ pop rdi
+%endif
+
+ ; Restore non-volatile registers
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop rbx
+ ret
+
+
+;;
+;; extern uint32_t asm_Eia3RemainderAVX(const void *ks, const void *data, uint64_t n_bits)
+;;
+;; Returns authentication update value to be XOR'ed with current authentication tag
+;;
+;; WIN64
+;; RCX - KS (key stream pointer)
+;; RDX - DATA (data pointer)
+;; R8 - N_BITS (number data bits to process)
+;; LIN64
+;; RDI - KS (key stream pointer)
+;; RSI - DATA (data pointer)
+;; RDX - N_BITS (number data bits to process)
+;;
+align 64
+MKGLOBAL(asm_Eia3RemainderAVX,function,internal)
+asm_Eia3RemainderAVX:
+
+%ifdef LINUX
+ %define KS rdi
+ %define DATA rsi
+ %define N_BITS rdx
+%else
+ %define KS rcx
+ %define DATA rdx
+ %define N_BITS r8
+%endif
+ FUNC_SAVE
+
+ vmovdqa xmm5, [bit_reverse_table_l]
+ vmovdqa xmm6, [bit_reverse_table_h]
+ vmovdqa xmm7, [bit_reverse_and_table]
+ vmovdqa xmm10, [data_mask_64bits]
+ vpxor xmm9, xmm9
+
+%rep 3
+ cmp N_BITS, 128
+ jb Eia3RoundsAVX_dq_end
+
+ ;; read 16 bytes and reverse bits
+ vmovdqu xmm0, [DATA]
+ vmovdqa xmm1, xmm0
+ vpand xmm1, xmm7
+
+ vmovdqa xmm2, xmm7
+ vpandn xmm2, xmm0
+ vpsrld xmm2, 4
+
+ vmovdqa xmm8, xmm6 ; bit reverse low nibbles (use high table)
+ vpshufb xmm8, xmm1
+
+ vmovdqa xmm4, xmm5 ; bit reverse high nibbles (use low table)
+ vpshufb xmm4, xmm2
+
+ vpor xmm8, xmm4
+ ; xmm8 - bit reversed data bytes
+
+ ;; ZUC authentication part
+ ;; - 4x32 data bits
+ ;; - set up KS
+ vmovdqu xmm3, [KS + (0*4)]
+ vmovdqu xmm4, [KS + (2*4)]
+ vpshufd xmm0, xmm3, 0x61
+ vpshufd xmm1, xmm4, 0x61
+
+ ;; - set up DATA
+ vmovdqa xmm2, xmm8
+ vpand xmm2, xmm10
+ vpshufd xmm3, xmm2, 0xdc
+ vmovdqa xmm4, xmm3
+
+ vpsrldq xmm8, 8
+ vpshufd xmm13, xmm8, 0xdc
+ vmovdqa xmm14, xmm13
+
+ ;; - clmul
+ ;; - xor the results from 4 32-bit words together
+ vpclmulqdq xmm3, xmm0, 0x00
+ vpclmulqdq xmm4, xmm0, 0x11
+ vpclmulqdq xmm13, xmm1, 0x00
+ vpclmulqdq xmm14, xmm1, 0x11
+
+ vpxor xmm3, xmm4
+ vpxor xmm13, xmm14
+ vpxor xmm9, xmm3
+ vpxor xmm9, xmm13
+ lea DATA, [DATA + 16]
+ lea KS, [KS + 16]
+ sub N_BITS, 128
+%endrep
+Eia3RoundsAVX_dq_end:
+
+%rep 3
+ cmp N_BITS, 32
+ jb Eia3RoundsAVX_dw_end
+
+ ;; swap dwords in KS
+ vmovq xmm1, [KS]
+ vpshufd xmm4, xmm1, 0xf1
+
+ ;; bit-reverse 4 bytes of data
+ vmovdqa xmm2, xmm7
+ vmovd xmm0, [DATA]
+ vmovdqa xmm1, xmm0
+ vpand xmm1, xmm2
+
+ vpandn xmm2, xmm0
+ vpsrld xmm2, 4
+
+ vmovdqa xmm0, xmm6 ; bit reverse low nibbles (use high table)
+ vpshufb xmm0, xmm1
+
+ vmovdqa xmm3, xmm5 ; bit reverse high nibbles (use low table)
+ vpshufb xmm3, xmm2
+
+ vpor xmm0, xmm3
+
+ ;; rol & xor
+ vpclmulqdq xmm0, xmm4, 0
+ vpxor xmm9, xmm0
+
+ lea DATA, [DATA + 4]
+ lea KS, [KS + 4]
+ sub N_BITS, 32
+%endrep
+
+Eia3RoundsAVX_dw_end:
+ vmovq rax, xmm9
+ shr rax, 32
+
+ or N_BITS, N_BITS
+ jz Eia3RoundsAVX_byte_loop_end
+
+ ;; get 64-bit key stream for the last data bits (less than 32)
+ mov KS, [KS]
+
+ ;; process remaining data bytes and bits
+Eia3RoundsAVX_byte_loop:
+ or N_BITS, N_BITS
+ jz Eia3RoundsAVX_byte_loop_end
+
+ cmp N_BITS, 8
+ jb Eia3RoundsAVX_byte_partial
+
+ movzx r11, byte [DATA]
+ sub N_BITS, 8
+ jmp Eia3RoundsAVX_byte_read
+
+Eia3RoundsAVX_byte_partial:
+ ;; process remaining bits (up to 7)
+ lea r11, [bit_mask_table]
+ movzx r10, byte [r11 + N_BITS]
+ movzx r11, byte [DATA]
+ and r11, r10
+ xor N_BITS, N_BITS
+Eia3RoundsAVX_byte_read:
+
+%assign DATATEST 0x80
+%rep 8
+ xor r10, r10
+ test r11, DATATEST
+ cmovne r10, KS
+ xor rax, r10
+ rol KS, 1
+%assign DATATEST (DATATEST >> 1)
+%endrep ; byte boundary
+ lea DATA, [DATA + 1]
+ jmp Eia3RoundsAVX_byte_loop
+
+Eia3RoundsAVX_byte_loop_end:
+
+ ;; eax - holds the return value at this stage
+ FUNC_RESTORE
+
+ ret
+
+;;
+;;extern uint32_t asm_Eia3Round64BAVX(uint32_t T, const void *KS, const void *DATA)
+;;
+;; Updates authentication tag T based on keystream KS and DATA.
+;; - it processes 64 bytes of DATA
+;; - reads data in 16 byte chunks and bit reverses them
+;; - reads and re-arranges KS
+;; - employs clmul for the XOR & ROL part
+;; - copies top 64 butes of KS to bottom (for the next round)
+;;
+;; WIN64
+;; RCX - T
+;; RDX - KS pointer to key stream (2 x 64 bytes)
+;;; R8 - DATA pointer to data
+;; LIN64
+;; RDI - T
+;; RSI - KS pointer to key stream (2 x 64 bytes)
+;; RDX - DATA pointer to data
+;;
+align 64
+MKGLOBAL(asm_Eia3Round64BAVX,function,internal)
+asm_Eia3Round64BAVX:
+
+%ifdef LINUX
+ %define T edi
+ %define KS rsi
+ %define DATA rdx
+%else
+ %define T ecx
+ %define KS rdx
+ %define DATA r8
+%endif
+
+ FUNC_SAVE
+
+ vmovdqa xmm5, [bit_reverse_table_l]
+ vmovdqa xmm6, [bit_reverse_table_h]
+ vmovdqa xmm7, [bit_reverse_and_table]
+ vmovdqa xmm10, [data_mask_64bits]
+
+ vpxor xmm9, xmm9
+%assign I 0
+%rep 4
+ ;; read 16 bytes and reverse bits
+ vmovdqu xmm0, [DATA + 16*I]
+ vpand xmm1, xmm0, xmm7
+
+ vpandn xmm2, xmm7, xmm0
+ vpsrld xmm2, 4
+
+ vpshufb xmm8, xmm6, xmm1 ; bit reverse low nibbles (use high table)
+ vpshufb xmm4, xmm5, xmm2 ; bit reverse high nibbles (use low table)
+
+ vpor xmm8, xmm4
+ ; xmm8 - bit reversed data bytes
+
+ ;; ZUC authentication part
+ ;; - 4x32 data bits
+ ;; - set up KS
+%if I != 0
+ vmovdqa xmm11, xmm12
+ vmovdqu xmm12, [KS + (I*16) + (4*4)]
+%else
+ vmovdqu xmm11, [KS + (I*16) + (0*4)]
+ vmovdqu xmm12, [KS + (I*16) + (4*4)]
+%endif
+ vpalignr xmm13, xmm12, xmm11, 8
+ vpshufd xmm2, xmm11, 0x61
+ vpshufd xmm3, xmm13, 0x61
+
+ ;; - set up DATA
+ vpand xmm13, xmm10, xmm8
+ vpshufd xmm0, xmm13, 0xdc
+
+ vpsrldq xmm8, 8
+ vpshufd xmm1, xmm8, 0xdc
+
+ ;; - clmul
+ ;; - xor the results from 4 32-bit words together
+%if I != 0
+ vpclmulqdq xmm13, xmm0, xmm2, 0x00
+ vpclmulqdq xmm14, xmm0, xmm2, 0x11
+ vpclmulqdq xmm15, xmm1, xmm3, 0x00
+ vpclmulqdq xmm8, xmm1, xmm3, 0x11
+
+ vpxor xmm13, xmm14
+ vpxor xmm15, xmm8
+ vpxor xmm9, xmm13
+ vpxor xmm9, xmm15
+%else
+ vpclmulqdq xmm9, xmm0, xmm2, 0x00
+ vpclmulqdq xmm13, xmm0, xmm2, 0x11
+ vpclmulqdq xmm14, xmm1, xmm3, 0x00
+ vpclmulqdq xmm15, xmm1, xmm3, 0x11
+
+ vpxor xmm14, xmm15
+ vpxor xmm9, xmm13
+ vpxor xmm9, xmm14
+%endif
+
+
+%assign I (I + 1)
+%endrep
+
+ ;; - update T
+ vmovq rax, xmm9
+ shr rax, 32
+ xor eax, T
+
+ FUNC_RESTORE
+
+ ret
+
+
+;----------------------------------------------------------------------------------------
+;----------------------------------------------------------------------------------------
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx/zuc_avx_top.c b/src/spdk/intel-ipsec-mb/avx/zuc_avx_top.c
new file mode 100755
index 000000000..b3ba2de81
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx/zuc_avx_top.c
@@ -0,0 +1,548 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/*-----------------------------------------------------------------------
+* zuc_avx.c
+*-----------------------------------------------------------------------
+* An implementation of ZUC, the core algorithm for the
+* 3GPP Confidentiality and Integrity algorithms.
+*
+*-----------------------------------------------------------------------*/
+
+#include <string.h>
+
+#include "include/zuc_internal.h"
+#include "include/wireless_common.h"
+#include "include/save_xmms.h"
+#include "include/clear_regs_mem.h"
+#include "intel-ipsec-mb.h"
+
+#define SAVE_XMMS save_xmms_avx
+#define RESTORE_XMMS restore_xmms_avx
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_avx
+
+static inline
+void _zuc_eea3_1_buffer_avx(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t length)
+{
+ DECLARE_ALIGNED(ZucState_t zucState, 64);
+ DECLARE_ALIGNED(uint8_t keyStream[64], 64);
+ /* buffer to store 64 bytes of keystream */
+ DECLARE_ALIGNED(uint8_t tempSrc[64], 64);
+ DECLARE_ALIGNED(uint8_t tempDst[64], 64);
+
+ const uint64_t *pIn64 = NULL;
+ const uint8_t *pIn8 = NULL;
+ uint8_t *pOut8 = NULL;
+ uint64_t *pOut64 = NULL, *pKeyStream64 = NULL;
+ uint64_t *pTemp64 = NULL, *pdstTemp64 = NULL;
+
+ uint32_t numKeyStreamsPerPkt = length/ ZUC_KEYSTR_LEN;
+ uint32_t numBytesLeftOver = length % ZUC_KEYSTR_LEN;
+
+ /* need to set the LFSR state to zero */
+ memset(&zucState, 0, sizeof(ZucState_t));
+
+ /* initialize the zuc state */
+ asm_ZucInitialization(pKey, pIv, &(zucState));
+
+ /* Loop Over all the Quad-Words in input buffer and XOR with the 64bits
+ * of generated keystream */
+ pOut64 = (uint64_t *) pBufferOut;
+ pIn64 = (const uint64_t *) pBufferIn;
+
+ while (numKeyStreamsPerPkt--) {
+ /* Generate the key stream 64 bytes at a time */
+ asm_ZucGenKeystream64B((uint32_t *) &keyStream[0], &zucState);
+
+ /* XOR The Keystream generated with the input buffer here */
+ pKeyStream64 = (uint64_t *) keyStream;
+ asm_XorKeyStream64B_avx(pIn64, pOut64, pKeyStream64);
+ pIn64 += 8;
+ pOut64 += 8;
+ }
+
+ /* Check for remaining 0 to 63 bytes */
+ pIn8 = (const uint8_t *) pBufferIn;
+ pOut8 = (uint8_t *) pBufferOut;
+ if(numBytesLeftOver) {
+ asm_ZucGenKeystream64B((uint32_t *) &keyStream[0], &zucState);
+
+ /* copy the remaining bytes into temporary buffer and XOR with
+ * the 64-bytes of keystream. Then copy on the valid bytes back
+ * to the output buffer */
+
+ memcpy(&tempSrc[0], &pIn8[length - numBytesLeftOver],
+ numBytesLeftOver);
+ pKeyStream64 = (uint64_t *) &keyStream[0];
+ pTemp64 = (uint64_t *) &tempSrc[0];
+ pdstTemp64 = (uint64_t *) &tempDst[0];
+
+ asm_XorKeyStream64B_avx(pTemp64, pdstTemp64, pKeyStream64);
+ memcpy(&pOut8[length - numBytesLeftOver], &tempDst[0],
+ numBytesLeftOver);
+
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(keyStream, sizeof(keyStream));
+ clear_mem(&zucState, sizeof(zucState));
+#endif
+}
+
+static inline
+void _zuc_eea3_4_buffer_avx(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t length[4])
+{
+ DECLARE_ALIGNED(ZucState4_t state, 64);
+ DECLARE_ALIGNED(ZucState_t singlePktState, 64);
+ unsigned int i = 0;
+ /* Calculate the minimum input packet size */
+ uint32_t bytes1 = (length[0] < length[1] ?
+ length[0] : length[1]);
+ uint32_t bytes2 = (length[2] < length[3] ?
+ length[2] : length[3]);
+ /* min number of bytes */
+ uint32_t bytes = (bytes1 < bytes2) ? bytes1 : bytes2;
+ uint32_t numKeyStreamsPerPkt = bytes/ZUC_KEYSTR_LEN;
+ uint32_t remainBytes[4] = {0};
+ DECLARE_ALIGNED(uint8_t keyStr1[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr2[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr3[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr4[64], 64);
+ DECLARE_ALIGNED(uint8_t tempSrc[64], 64);
+ DECLARE_ALIGNED(uint8_t tempDst[64], 64);
+ /* structure to store the 4 keys */
+ DECLARE_ALIGNED(ZucKey4_t keys, 64);
+ /* structure to store the 4 IV's */
+ DECLARE_ALIGNED(ZucIv4_t ivs, 64);
+ uint32_t numBytesLeftOver = 0;
+ const uint8_t *pTempBufInPtr = NULL;
+ uint8_t *pTempBufOutPtr = NULL;
+
+ const uint64_t *pIn64_0 = NULL;
+ const uint64_t *pIn64_1 = NULL;
+ const uint64_t *pIn64_2 = NULL;
+ const uint64_t *pIn64_3 = NULL;
+ uint64_t *pOut64_0 = NULL;
+ uint64_t *pOut64_1 = NULL;
+ uint64_t *pOut64_2 = NULL;
+ uint64_t *pOut64_3 = NULL;
+ uint64_t *pTempSrc64 = NULL;
+ uint64_t *pTempDst64 = NULL;
+ uint64_t *pKeyStream64 = NULL;
+
+ /* rounded down minimum length */
+ bytes = numKeyStreamsPerPkt * ZUC_KEYSTR_LEN;
+
+ /* Need to set the LFSR state to zero */
+ memset(&state, 0, sizeof(ZucState4_t));
+
+ /* Calculate the number of bytes left over for each packet */
+ for (i=0; i< 4; i++)
+ remainBytes[i] = length[i] - bytes;
+
+ /* Setup the Keys */
+ keys.pKey1 = pKey[0];
+ keys.pKey2 = pKey[1];
+ keys.pKey3 = pKey[2];
+ keys.pKey4 = pKey[3];
+
+ /* setup the IV's */
+ ivs.pIv1 = pIv[0];
+ ivs.pIv2 = pIv[1];
+ ivs.pIv3 = pIv[2];
+ ivs.pIv4 = pIv[3];
+
+ asm_ZucInitialization_4_avx( &keys, &ivs, &state);
+
+ pOut64_0 = (uint64_t *) pBufferOut[0];
+ pOut64_1 = (uint64_t *) pBufferOut[1];
+ pOut64_2 = (uint64_t *) pBufferOut[2];
+ pOut64_3 = (uint64_t *) pBufferOut[3];
+
+ pIn64_0 = (const uint64_t *) pBufferIn[0];
+ pIn64_1 = (const uint64_t *) pBufferIn[1];
+ pIn64_2 = (const uint64_t *) pBufferIn[2];
+ pIn64_3 = (const uint64_t *) pBufferIn[3];
+
+ /* Loop for 64 bytes at a time generating 4 key-streams per loop */
+ while (numKeyStreamsPerPkt) {
+ /* Generate 64 bytes at a time */
+ asm_ZucGenKeystream64B_4_avx(&state,
+ (uint32_t *) keyStr1,
+ (uint32_t *) keyStr2,
+ (uint32_t *) keyStr3,
+ (uint32_t *) keyStr4);
+
+ /* XOR the KeyStream with the input buffers and store in output
+ * buffer*/
+ pKeyStream64 = (uint64_t *) keyStr1;
+ asm_XorKeyStream64B_avx(pIn64_0, pOut64_0, pKeyStream64);
+ pIn64_0 += 8;
+ pOut64_0 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr2;
+ asm_XorKeyStream64B_avx(pIn64_1, pOut64_1, pKeyStream64);
+ pIn64_1 += 8;
+ pOut64_1 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr3;
+ asm_XorKeyStream64B_avx(pIn64_2, pOut64_2, pKeyStream64);
+ pIn64_2 += 8;
+ pOut64_2 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr4;
+ asm_XorKeyStream64B_avx(pIn64_3, pOut64_3, pKeyStream64);
+ pIn64_3 += 8;
+ pOut64_3 += 8;
+
+ /* Update keystream count */
+ numKeyStreamsPerPkt--;
+
+ }
+
+ /* process each packet separately for the remaining bytes */
+ for (i = 0; i < 4; i++) {
+ if (remainBytes[i]) {
+ /* need to copy the zuc state to single packet state */
+ singlePktState.lfsrState[0] = state.lfsrState[0][i];
+ singlePktState.lfsrState[1] = state.lfsrState[1][i];
+ singlePktState.lfsrState[2] = state.lfsrState[2][i];
+ singlePktState.lfsrState[3] = state.lfsrState[3][i];
+ singlePktState.lfsrState[4] = state.lfsrState[4][i];
+ singlePktState.lfsrState[5] = state.lfsrState[5][i];
+ singlePktState.lfsrState[6] = state.lfsrState[6][i];
+ singlePktState.lfsrState[7] = state.lfsrState[7][i];
+ singlePktState.lfsrState[8] = state.lfsrState[8][i];
+ singlePktState.lfsrState[9] = state.lfsrState[9][i];
+ singlePktState.lfsrState[10] = state.lfsrState[10][i];
+ singlePktState.lfsrState[11] = state.lfsrState[11][i];
+ singlePktState.lfsrState[12] = state.lfsrState[12][i];
+ singlePktState.lfsrState[13] = state.lfsrState[13][i];
+ singlePktState.lfsrState[14] = state.lfsrState[14][i];
+ singlePktState.lfsrState[15] = state.lfsrState[15][i];
+
+ singlePktState.fR1 = state.fR1[i];
+ singlePktState.fR2 = state.fR2[i];
+
+ singlePktState.bX0 = state.bX0[i];
+ singlePktState.bX1 = state.bX1[i];
+ singlePktState.bX2 = state.bX2[i];
+ singlePktState.bX3 = state.bX3[i];
+
+ numKeyStreamsPerPkt = remainBytes[i] / ZUC_KEYSTR_LEN;
+ numBytesLeftOver = remainBytes[i] % ZUC_KEYSTR_LEN;
+
+ pTempBufInPtr = pBufferIn[i];
+ pTempBufOutPtr = pBufferOut[i];
+
+ /* update the output and input pointers here to point
+ * to the i'th buffers */
+ pOut64_0 = (uint64_t *) &pTempBufOutPtr[length[i] -
+ remainBytes[i]];
+ pIn64_0 = (const uint64_t *) &pTempBufInPtr[length[i] -
+ remainBytes[i]];
+
+ while (numKeyStreamsPerPkt--) {
+ /* Generate the key stream 64 bytes at a time */
+ asm_ZucGenKeystream64B((uint32_t *) keyStr1,
+ &singlePktState);
+ pKeyStream64 = (uint64_t *) keyStr1;
+ asm_XorKeyStream64B_avx(pIn64_0, pOut64_0,
+ pKeyStream64);
+ pIn64_0 += 8;
+ pOut64_0 += 8;
+ }
+
+
+ /* Check for remaining 0 to 63 bytes */
+ if (numBytesLeftOver) {
+ asm_ZucGenKeystream64B((uint32_t *) &keyStr1,
+ &singlePktState);
+ uint32_t offset = length[i] - numBytesLeftOver;
+
+ /* copy the remaining bytes into temporary
+ * buffer and XOR with the 64-bytes of
+ * keystream. Then copy on the valid bytes back
+ * to the output buffer */
+ memcpy(&tempSrc[0], &pTempBufInPtr[offset],
+ numBytesLeftOver);
+ memset(&tempSrc[numBytesLeftOver], 0,
+ 64 - numBytesLeftOver);
+
+ pKeyStream64 = (uint64_t *) &keyStr1[0];
+ pTempSrc64 = (uint64_t *) &tempSrc[0];
+ pTempDst64 = (uint64_t *) &tempDst[0];
+ asm_XorKeyStream64B_avx(pTempSrc64, pTempDst64,
+ pKeyStream64);
+
+ memcpy(&pTempBufOutPtr[offset],
+ &tempDst[0], numBytesLeftOver);
+ }
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(keyStr1, sizeof(keyStr1));
+ clear_mem(keyStr2, sizeof(keyStr2));
+ clear_mem(keyStr3, sizeof(keyStr3));
+ clear_mem(keyStr4, sizeof(keyStr4));
+ clear_mem(&singlePktState, sizeof(singlePktState));
+ clear_mem(&state, sizeof(state));
+ clear_mem(&keys, sizeof(keys));
+ clear_mem(&ivs, sizeof(ivs));
+#endif
+}
+
+void zuc_eea3_1_buffer_avx(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t length)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length < ZUC_MIN_LEN || length > ZUC_MAX_LEN)
+ return;
+#endif
+ _zuc_eea3_1_buffer_avx(pKey, pIv, pBufferIn, pBufferOut, length);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void zuc_eea3_4_buffer_avx(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t length[4])
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ unsigned int i;
+
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL || length == NULL)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ if (pKey[i] == NULL || pIv[i] == NULL ||
+ pBufferIn[i] == NULL || pBufferOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length[i] < ZUC_MIN_LEN || length[i] > ZUC_MAX_LEN)
+ return;
+ }
+#endif
+
+ _zuc_eea3_4_buffer_avx(pKey, pIv, pBufferIn, pBufferOut, length);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void zuc_eea3_n_buffer_avx(const void * const pKey[], const void * const pIv[],
+ const void * const pBufferIn[], void *pBufferOut[],
+ const uint32_t length[],
+ const uint32_t numBuffers)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+
+ unsigned int i;
+ unsigned int packetCount = numBuffers;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL || length == NULL)
+ return;
+
+ for (i = 0; i < numBuffers; i++) {
+ if (pKey[i] == NULL || pIv[i] == NULL ||
+ pBufferIn[i] == NULL || pBufferOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length[i] < ZUC_MIN_LEN || length[i] > ZUC_MAX_LEN)
+ return;
+ }
+#endif
+ i = 0;
+
+ while(packetCount >= 4) {
+ packetCount -=4;
+ _zuc_eea3_4_buffer_avx(&pKey[i],
+ &pIv[i],
+ &pBufferIn[i],
+ &pBufferOut[i],
+ &length[i]);
+ i+=4;
+ }
+
+ while(packetCount--) {
+ _zuc_eea3_1_buffer_avx(pKey[i],
+ pIv[i],
+ pBufferIn[i],
+ pBufferOut[i],
+ length[i]);
+ i++;
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+static inline uint64_t rotate_left(uint64_t u, size_t r)
+{
+ return (((u) << (r)) | ((u) >> (64 - (r))));
+}
+
+static inline uint64_t load_uint64(const void *ptr)
+{
+ return *((const uint64_t *)ptr);
+}
+
+void zuc_eia3_1_buffer_avx(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ const uint32_t lengthInBits,
+ uint32_t *pMacI)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+ DECLARE_ALIGNED(ZucState_t zucState, 64);
+ DECLARE_ALIGNED(uint32_t keyStream[16 * 2], 64);
+ const uint32_t keyStreamLengthInBits = ZUC_KEYSTR_LEN * 8;
+ /* generate a key-stream 2 words longer than the input message */
+ const uint32_t N = lengthInBits + (2 * ZUC_WORD);
+ uint32_t L = (N + 31) / ZUC_WORD;
+ uint32_t *pZuc = (uint32_t *) &keyStream[0];
+ uint32_t remainingBits = lengthInBits;
+ uint32_t T = 0;
+ const uint8_t *pIn8 = (const uint8_t *) pBufferIn;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL || pMacI == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBits < ZUC_MIN_LEN || lengthInBits > ZUC_MAX_LEN)
+ return;
+#endif
+
+ memset(&zucState, 0, sizeof(ZucState_t));
+
+ asm_ZucInitialization(pKey, pIv, &(zucState));
+ asm_ZucGenKeystream64B(pZuc, &zucState);
+
+ /* loop over the message bits */
+ while (remainingBits >= keyStreamLengthInBits) {
+ remainingBits -= keyStreamLengthInBits;
+ L -= (keyStreamLengthInBits / 32);
+ /* Generate the next key stream 8 bytes or 64 bytes */
+ if (!remainingBits)
+ asm_ZucGenKeystream8B(&keyStream[16], &zucState);
+ else
+ asm_ZucGenKeystream64B(&keyStream[16], &zucState);
+ T = asm_Eia3Round64BAVX(T, &keyStream[0], pIn8);
+ memcpy(&keyStream[0], &keyStream[16], 16 * sizeof(uint32_t));
+ pIn8 = &pIn8[ZUC_KEYSTR_LEN];
+ }
+
+ /*
+ * If remaining bits has more than 14 ZUC WORDS (double words),
+ * keystream needs to have up to another 2 ZUC WORDS (8B)
+ */
+ if (remainingBits > (14 * 32))
+ asm_ZucGenKeystream8B(&keyStream[16], &zucState);
+ T ^= asm_Eia3RemainderAVX(&keyStream[0], pIn8, remainingBits);
+ T ^= rotate_left(load_uint64(&keyStream[remainingBits / 32]),
+ remainingBits % 32);
+
+ /* save the final MAC-I result */
+ uint32_t keyBlock = keyStream[L - 1];
+ *pMacI = bswap4(T ^ keyBlock);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data (in registers and stack) */
+ clear_mem(keyStream, sizeof(keyStream));
+ clear_mem(&zucState, sizeof(zucState));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
diff --git a/src/spdk/intel-ipsec-mb/avx2/gcm128_avx_gen4.asm b/src/spdk/intel-ipsec-mb/avx2/gcm128_avx_gen4.asm
new file mode 100644
index 000000000..924602b63
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/gcm128_avx_gen4.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2017-2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM128_MODE 1
+%include "avx2/gcm_avx_gen4.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/gcm192_avx_gen4.asm b/src/spdk/intel-ipsec-mb/avx2/gcm192_avx_gen4.asm
new file mode 100644
index 000000000..7295d5b74
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/gcm192_avx_gen4.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2017-2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM192_MODE 1
+%include "avx2/gcm_avx_gen4.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/gcm256_avx_gen4.asm b/src/spdk/intel-ipsec-mb/avx2/gcm256_avx_gen4.asm
new file mode 100644
index 000000000..bf2a89cb9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/gcm256_avx_gen4.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2017-2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM256_MODE 1
+%include "avx2/gcm_avx_gen4.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/gcm_avx_gen4.asm b/src/spdk/intel-ipsec-mb/avx2/gcm_avx_gen4.asm
new file mode 100644
index 000000000..88697d9d1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/gcm_avx_gen4.asm
@@ -0,0 +1,3641 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+;
+;
+; References:
+; This code was derived and highly optimized from the code described in paper:
+; Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on Intel Architecture Processors. August, 2010
+; The details of the implementation is explained in:
+; Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode on Intel Architecture Processors. October, 2012.
+;
+;
+;
+;
+; Assumptions:
+;
+;
+;
+; iv:
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Salt (From the SA) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Initialization Vector |
+; | (This is the sequence number from IPSec header) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x1 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+;
+;
+; AAD:
+; AAD will be padded with 0 to the next 16byte multiple
+; for example, assume AAD is a u32 vector
+;
+; if AAD is 8 bytes:
+; AAD[3] = {A0, A1};
+; padded AAD in xmm register = {A1 A0 0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A1) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 32-bit Sequence Number (A0) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 32-bit Sequence Number
+;
+; if AAD is 12 bytes:
+; AAD[3] = {A0, A1, A2};
+; padded AAD in xmm register = {A2 A1 A0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A2) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 64-bit Extended Sequence Number {A1,A0} |
+; | |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 64-bit Extended Sequence Number
+;
+;
+; aadLen:
+; Must be a multiple of 4 bytes and from the definition of the spec.
+; The code additionally supports any aadLen length.
+;
+; TLen:
+; from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+;
+; poly = x^128 + x^127 + x^126 + x^121 + 1
+; throughout the code, one tab and two tab indentations are used. one tab is for GHASH part, two tabs is for AES part.
+;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/clear_regs.asm"
+%include "include/gcm_defines.asm"
+%include "include/gcm_keys_avx2_avx512.asm"
+%include "include/memcpy.asm"
+
+%ifndef GCM128_MODE
+%ifndef GCM192_MODE
+%ifndef GCM256_MODE
+%error "No GCM mode selected for gcm_avx_gen4.asm!"
+%endif
+%endif
+%endif
+
+;; Decide on AES-GCM key size to compile for
+%ifdef GCM128_MODE
+%define NROUNDS 9
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _128 %+ y %+ avx_gen4
+%endif
+
+%ifdef GCM192_MODE
+%define NROUNDS 11
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _192 %+ y %+ avx_gen4
+%endif
+
+%ifdef GCM256_MODE
+%define NROUNDS 13
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _256 %+ y %+ avx_gen4
+%endif
+
+section .text
+default rel
+
+; need to push 4 registers into stack to maintain
+%define STACK_OFFSET 8*4
+
+%define TMP2 16*0 ; Temporary storage for AES State 2 (State 1 is stored in an XMM register)
+%define TMP3 16*1 ; Temporary storage for AES State 3
+%define TMP4 16*2 ; Temporary storage for AES State 4
+%define TMP5 16*3 ; Temporary storage for AES State 5
+%define TMP6 16*4 ; Temporary storage for AES State 6
+%define TMP7 16*5 ; Temporary storage for AES State 7
+%define TMP8 16*6 ; Temporary storage for AES State 8
+
+%define LOCAL_STORAGE 16*7
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define VARIABLE_OFFSET LOCAL_STORAGE + XMM_STORAGE
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+; Input: A and B (128-bits each, bit-reflected)
+; Output: C = A*B*x mod poly, (i.e. >>1 )
+; To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+; GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GHASH_MUL 7
+%define %%GH %1 ; 16 Bytes
+%define %%HK %2 ; 16 Bytes
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vpclmulqdq %%T1, %%GH, %%HK, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%T2, %%GH, %%HK, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%T3, %%GH, %%HK, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%GH, %%GH, %%HK, 0x10 ; %%GH = a0*b1
+ vpxor %%GH, %%GH, %%T3
+
+
+ vpsrldq %%T3, %%GH, 8 ; shift-R %%GH 2 DWs
+ vpslldq %%GH, %%GH, 8 ; shift-L %%GH 2 DWs
+
+ vpxor %%T1, %%T1, %%T3
+ vpxor %%GH, %%GH, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%GH, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L %%T2 2 DWs
+
+ vpxor %%GH, %%GH, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%GH, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%GH, %%T3, %%GH, 0x10
+ vpslldq %%GH, %%GH, 4 ; shift-L %%GH 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%GH, %%GH, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%GH, %%GH, %%T1 ; the result is in %%GH
+
+%endmacro
+
+
+; In PRECOMPUTE, the commands filling Hashkey_i_k are not required for avx_gen4
+; functions, but are kept to allow users to switch cpu architectures between calls
+; of pre, init, update, and finalize.
+%macro PRECOMPUTE 8
+%define %%GDATA %1
+%define %%HK %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+%define %%T6 %8
+
+ ; Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vmovdqa %%T5, %%HK
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^2<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_2], %%T5 ; [HashKey_2] = HashKey^2<<1 mod poly
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^3<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_3], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^4<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_4], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^5<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_5], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^6<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_6], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^7<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_7], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^8<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_8], %%T5
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; READ_SMALL_DATA_INPUT: Packs xmm register with data when data input is less than 16 bytes.
+; Returns 0 if data has length 0.
+; Input: The input data (INPUT), that data's length (LENGTH).
+; Output: The packed xmm register (OUTPUT).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro READ_SMALL_DATA_INPUT 6
+%define %%OUTPUT %1 ; %%OUTPUT is an xmm register
+%define %%INPUT %2
+%define %%LENGTH %3
+%define %%END_READ_LOCATION %4 ; All this and the lower inputs are temp registers
+%define %%COUNTER %5
+%define %%TMP1 %6
+
+ vpxor %%OUTPUT, %%OUTPUT
+ mov %%COUNTER, %%LENGTH
+ mov %%END_READ_LOCATION, %%INPUT
+ add %%END_READ_LOCATION, %%LENGTH
+ xor %%TMP1, %%TMP1
+
+
+ cmp %%COUNTER, 8
+ jl %%_byte_loop_2
+ vpinsrq %%OUTPUT, [%%INPUT],0 ;Read in 8 bytes if they exists
+ je %%_done
+
+ sub %%COUNTER, 8
+
+%%_byte_loop_1: ;Read in data 1 byte at a time while data is left
+ shl %%TMP1, 8 ;This loop handles when 8 bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_1
+ vpinsrq %%OUTPUT, %%TMP1, 1
+ jmp %%_done
+
+%%_byte_loop_2: ;Read in data 1 byte at a time while data is left
+ ;; NOTE: in current implementation check for zero length is obsolete here.
+ ;; The adequate checks are done by callers of this macro.
+ ;; cmp %%COUNTER, 0
+ ;; je %%_done
+ shl %%TMP1, 8 ;This loop handles when no bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_2
+ vpinsrq %%OUTPUT, %%TMP1, 0
+%%_done:
+
+%endmacro ; READ_SMALL_DATA_INPUT
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+; Input: The input data (A_IN), that data's length (A_LEN), and the hash key (HASH_KEY).
+; Output: The hash of the data (AAD_HASH).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CALC_AAD_HASH 15
+%define %%A_IN %1
+%define %%A_LEN %2
+%define %%AAD_HASH %3
+%define %%GDATA_KEY %4
+%define %%XTMP0 %5 ; xmm temp reg 5
+%define %%XTMP1 %6 ; xmm temp reg 5
+%define %%XTMP2 %7
+%define %%XTMP3 %8
+%define %%XTMP4 %9
+%define %%XTMP5 %10 ; xmm temp reg 5
+%define %%T1 %11 ; temp reg 1
+%define %%T2 %12
+%define %%T3 %13
+%define %%T4 %14
+%define %%T5 %15 ; temp reg 5
+
+
+ mov %%T1, %%A_IN ; T1 = AAD
+ mov %%T2, %%A_LEN ; T2 = aadLen
+ vpxor %%AAD_HASH, %%AAD_HASH
+
+%%_get_AAD_loop128:
+ cmp %%T2, 128
+ jl %%_exit_AAD_loop128
+
+ vmovdqu %%XTMP0, [%%T1 + 16*0]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_8]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+%assign i 1
+%assign j 7
+%rep 7
+ vmovdqu %%XTMP0, [%%T1 + 16*i]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_ %+ j]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+%assign i (i + 1)
+%assign j (j - 1)
+%endrep
+
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ sub %%T2, 128
+ je %%_CALC_AAD_done
+
+ add %%T1, 128
+ jmp %%_get_AAD_loop128
+
+%%_exit_AAD_loop128:
+ cmp %%T2, 16
+ jl %%_get_small_AAD_block
+
+ ;; calculate hash_key position to start with
+ mov %%T3, %%T2
+ and %%T3, -16 ; 1 to 7 blocks possible here
+ neg %%T3
+ add %%T3, HashKey_1 + 16
+ lea %%T3, [%%GDATA_KEY + %%T3]
+
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16 ; move to next data block
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+
+%%_AAD_blocks:
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+ jmp %%_AAD_blocks
+
+%%_AAD_reduce:
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ or %%T2, %%T2
+ je %%_CALC_AAD_done
+
+%%_get_small_AAD_block:
+ vmovdqu %%XTMP0, [%%GDATA_KEY + HashKey]
+ READ_SMALL_DATA_INPUT %%XTMP1, %%T1, %%T2, %%T3, %%T4, %%T5
+ ;byte-reflect the AAD data
+ vpshufb %%XTMP1, [rel SHUF_MASK]
+ vpxor %%AAD_HASH, %%XTMP1
+ GHASH_MUL %%AAD_HASH, %%XTMP0, %%XTMP1, %%XTMP2, %%XTMP3, %%XTMP4, %%XTMP5
+
+%%_CALC_AAD_done:
+
+%endmacro ; CALC_AAD_HASH
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks between update calls.
+; Requires the input data be at least 1 byte long.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN), the current data offset (DATA_OFFSET),
+; and whether encoding or decoding (ENC_DEC)
+; Output: A cypher of the first partial block (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10, r12, r13, r15, xmm0, xmm1, xmm2, xmm3, xmm5, xmm6, xmm9, xmm10, xmm11, xmm13
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro PARTIAL_BLOCK 8
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%DATA_OFFSET %6
+%define %%AAD_HASH %7
+%define %%ENC_DEC %8
+
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ je %%_partial_block_done ;Leave Macro if no partial blocks
+
+ cmp %%PLAIN_CYPH_LEN, 16 ;Read in input data without over reading
+ jl %%_fewer_than_16_bytes
+ VXLDR xmm1, [%%PLAIN_CYPH_IN] ;If more than 16 bytes of data, just fill the xmm register
+ jmp %%_data_read
+
+%%_fewer_than_16_bytes:
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ READ_SMALL_DATA_INPUT xmm1, r10, %%PLAIN_CYPH_LEN, rax, r12, r15
+
+%%_data_read: ;Finished reading in data
+
+
+ vmovdqu xmm9, [%%GDATA_CTX + PBlockEncKey] ;xmm9 = my_ctx_data.partial_block_enc_key
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ lea r12, [rel SHIFT_MASK]
+
+ add r12, r13 ; adjust the shuffle mask pointer to be able to shift r13 bytes (16-r13 is the number of bytes in plaintext mod 16)
+ vmovdqu xmm2, [r12] ; get the appropriate shuffle mask
+ vpshufb xmm9, xmm2 ;shift right r13 bytes
+
+%ifidn %%ENC_DEC, DEC
+ vmovdqa xmm3, xmm1
+ vpxor xmm9, xmm1 ; Cyphertext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_1 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_1:
+
+ vmovdqu xmm1, [r12 + ALL_F - SHIFT_MASK]; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ vpand xmm3, xmm1
+ vpshufb xmm3, [rel SHUF_MASK]
+ vpshufb xmm3, xmm2
+ vpxor %%AAD_HASH, xmm3
+
+
+ cmp r15,0
+ jl %%_partial_incomplete_1
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_dec_done
+%%_partial_incomplete_1:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_dec_done:
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+%else
+ vpxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_2 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_2:
+
+ vmovdqu xmm1, [r12 + ALL_F-SHIFT_MASK] ; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ vpshufb xmm9, [rel SHUF_MASK]
+ vpshufb xmm9, xmm2
+ vpxor %%AAD_HASH, xmm9
+
+ cmp r15,0
+ jl %%_partial_incomplete_2
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_encode_done
+%%_partial_incomplete_2:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_encode_done:
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+ vpshufb xmm9, [rel SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+ vpshufb xmm9, xmm2
+%endif
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output encrypted Bytes
+ cmp r15,0
+ jl %%_partial_fill
+ mov r12, r13
+ mov r13, 16
+ sub r13, r12 ; Set r13 to be the number of bytes to write out
+ jmp %%_count_set
+%%_partial_fill:
+ mov r13, %%PLAIN_CYPH_LEN
+%%_count_set:
+ vmovq rax, xmm9
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT+ %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ vpsrldq xmm9, xmm9, 8
+ vmovq rax, xmm9
+ sub r13, 8
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_partial_block_done:
+%endmacro ; PARTIAL_BLOCK
+
+
+%macro GHASH_SINGLE_MUL 9
+%define %%GDATA %1
+%define %%HASHKEY %2
+%define %%CIPHER %3
+%define %%STATE_11 %4
+%define %%STATE_00 %5
+%define %%STATE_MID %6
+%define %%T1 %7
+%define %%T2 %8
+%define %%FIRST %9
+
+ vmovdqu %%T1, [%%GDATA + %%HASHKEY]
+%ifidn %%FIRST, first
+ vpclmulqdq %%STATE_11, %%CIPHER, %%T1, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%STATE_00, %%CIPHER, %%T1, 0x00 ; %%T4_2 = a0*b0
+ vpclmulqdq %%STATE_MID, %%CIPHER, %%T1, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10 ; %%T5 = a0*b1
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%else
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x11
+ vpxor %%STATE_11, %%STATE_11, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x00
+ vpxor %%STATE_00, %%STATE_00, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x01
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%endif
+
+%endmacro
+
+; if a = number of total plaintext bytes
+; b = floor(a/16)
+; %%num_initial_blocks = b mod 8;
+; encrypt the initial %%num_initial_blocks blocks and apply ghash on the ciphertext
+; %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r14 are used as a pointer only, not modified.
+; Updated AAD_HASH is returned in %%T3
+
+%macro INITIAL_BLOCKS 23
+%define %%GDATA_KEY %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%LENGTH %4
+%define %%DATA_OFFSET %5
+%define %%num_initial_blocks %6 ; can be 0, 1, 2, 3, 4, 5, 6 or 7
+%define %%T1 %7
+%define %%T2 %8
+%define %%T3 %9
+%define %%T4 %10
+%define %%T5 %11
+%define %%CTR %12
+%define %%XMM1 %13
+%define %%XMM2 %14
+%define %%XMM3 %15
+%define %%XMM4 %16
+%define %%XMM5 %17
+%define %%XMM6 %18
+%define %%XMM7 %19
+%define %%XMM8 %20
+%define %%T6 %21
+%define %%T_key %22
+%define %%ENC_DEC %23
+
+%assign i (8-%%num_initial_blocks)
+ ;; Move AAD_HASH to temp reg
+ vmovdqu %%T2, %%XMM8
+ ;; Start AES for %%num_initial_blocks blocks
+ ;; vmovdqu %%CTR, [%%GDATA_CTX + CurCount] ; %%CTR = Y0
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpaddd %%CTR, %%CTR, [rel ONE] ; INCR Y0
+ vmovdqa reg(i), %%CTR
+ vpshufb reg(i), [rel SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+%if(%%num_initial_blocks>0)
+vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpxor reg(i),reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep
+
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%endif ; %if(%%num_initial_blocks>0)
+
+
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; Write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+ %endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+%assign i (i+1)
+%endrep
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%assign i (9-%%num_initial_blocks)
+%if(%%num_initial_blocks>0)
+ vmovdqa %%T3, reg(i)
+%assign i (i+1)
+%endif
+%if(%%num_initial_blocks>1)
+%rep %%num_initial_blocks-1
+ vmovdqu [rsp + TMP %+ i], reg(i)
+%assign i (i+1)
+%endrep
+%endif
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Prepare 8 counter blocks and perform rounds of AES cipher on
+ ;; them, load plain/cipher text and store cipher/plain text.
+ ;; Stitch GHASH computation in between AES rounds.
+ vpaddd %%XMM1, %%CTR, [rel ONE] ; INCR Y0
+ vpaddd %%XMM2, %%CTR, [rel TWO] ; INCR Y0
+ vpaddd %%XMM3, %%XMM1, [rel TWO] ; INCR Y0
+ vpaddd %%XMM4, %%XMM2, [rel TWO] ; INCR Y0
+ vpaddd %%XMM5, %%XMM3, [rel TWO] ; INCR Y0
+ vpaddd %%XMM6, %%XMM4, [rel TWO] ; INCR Y0
+ vpaddd %%XMM7, %%XMM5, [rel TWO] ; INCR Y0
+ vpaddd %%XMM8, %%XMM6, [rel TWO] ; INCR Y0
+ vmovdqa %%CTR, %%XMM8
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+ vpxor %%XMM1, %%XMM1, %%T_key
+ vpxor %%XMM2, %%XMM2, %%T_key
+ vpxor %%XMM3, %%XMM3, %%T_key
+ vpxor %%XMM4, %%XMM4, %%T_key
+ vpxor %%XMM5, %%XMM5, %%T_key
+ vpxor %%XMM6, %%XMM6, %%T_key
+ vpxor %%XMM7, %%XMM7, %%T_key
+ vpxor %%XMM8, %%XMM8, %%T_key
+
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+
+%define %%T4_2 %%T4
+%if(%%num_initial_blocks>0)
+ ;; Hash in AES state
+ ;; T2 - incoming AAD hash
+ vpxor %%T2, %%T3
+
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*1]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*2]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>1)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*3]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*4]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>2)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>3)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*5]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*6]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>4)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*7]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*8]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>5)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*9]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%ifndef GCM128_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*10]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>6)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%ifdef GCM128_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*10]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif
+
+%ifdef GCM192_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*11]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*12]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif
+%ifdef GCM256_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*11]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*12]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>7)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%ifdef GCM256_MODE ; GCM256
+ vmovdqu %%T_key, [%%GDATA_KEY+16*13]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*14]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif ; GCM256 mode
+
+%if(%%num_initial_blocks>0)
+ vpsrldq %%T3, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpslldq %%T6, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpxor %%T1, %%T1, %%T3 ; accumulate the results in %%T1:%%T4
+ vpxor %%T4, %%T6, %%T4
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; First phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ ;; First phase of the reduction complete
+ vpxor %%T4, %%T4, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; Second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T4, 0x00
+ ;; Shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+ vpsrldq %%T2, %%T2, 4
+
+ vpclmulqdq %%T4, %%T3, %%T4, 0x10
+ ;; Shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+ vpslldq %%T4, %%T4, 4
+ ;; Second phase of the reduction complete
+ vpxor %%T4, %%T4, %%T2
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; The result is in %%T3
+ vpxor %%T3, %%T1, %%T4
+%else
+ ;; The hash should end up in T3
+ vmovdqa %%T3, %%T2
+%endif
+
+ ;; Final hash is now in T3
+%if %%num_initial_blocks > 0
+ ;; NOTE: obsolete in case %%num_initial_blocks = 0
+ sub %%LENGTH, 16*%%num_initial_blocks
+%endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*0]
+ vpxor %%XMM1, %%XMM1, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*0], %%XMM1
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM1, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*1]
+ vpxor %%XMM2, %%XMM2, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*1], %%XMM2
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM2, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*2]
+ vpxor %%XMM3, %%XMM3, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*2], %%XMM3
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM3, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*3]
+ vpxor %%XMM4, %%XMM4, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*3], %%XMM4
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM4, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*4]
+ vpxor %%XMM5, %%XMM5, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*4], %%XMM5
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM5, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*5]
+ vpxor %%XMM6, %%XMM6, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*5], %%XMM6
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM6, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*6]
+ vpxor %%XMM7, %%XMM7, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*6], %%XMM7
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM7, %%T1
+ %endif
+
+%if %%num_initial_blocks > 0
+ ;; NOTE: 'jl' is never taken for %%num_initial_blocks = 0
+ ;; This macro is executed for lenght 128 and up,
+ ;; zero length is checked in GCM_ENC_DEC.
+ ;; If the last block is partial then the xor will be done later
+ ;; in ENCRYPT_FINAL_PARTIAL_BLOCK.
+ ;; We know it's partial if LENGTH - 16*num_initial_blocks < 128
+ cmp %%LENGTH, 128
+ jl %%_initial_skip_last_word_write
+%endif
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*7]
+ vpxor %%XMM8, %%XMM8, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*7], %%XMM8
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM8, %%T1
+ %endif
+
+ ;; Update %%LENGTH with the number of blocks processed
+ sub %%LENGTH, 16
+ add %%DATA_OFFSET, 16
+%%_initial_skip_last_word_write:
+ sub %%LENGTH, 128-16
+ add %%DATA_OFFSET, 128-16
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ ;; Combine GHASHed value with the corresponding ciphertext
+ vpxor %%XMM1, %%XMM1, %%T3
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_initial_blocks_done:
+
+
+%endmacro
+
+;;; INITIAL_BLOCKS macro with support for a partial final block.
+;;; num_initial_blocks is expected to include the partial final block
+;;; in the count.
+%macro INITIAL_BLOCKS_PARTIAL 25
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%LENGTH %5
+%define %%DATA_OFFSET %6
+%define %%num_initial_blocks %7 ; can be 1, 2, 3, 4, 5, 6 or 7 (not 0)
+%define %%T1 %8
+%define %%T2 %9
+%define %%T3 %10
+%define %%T4 %11
+%define %%T5 %12
+%define %%CTR %13
+%define %%XMM1 %14
+%define %%XMM2 %15
+%define %%XMM3 %16
+%define %%XMM4 %17
+%define %%XMM5 %18
+%define %%XMM6 %19
+%define %%XMM7 %20
+%define %%XMM8 %21
+%define %%T6 %22
+%define %%T_key %23
+%define %%ENC_DEC %24
+%define %%INSTANCE_TYPE %25
+
+%assign i (8-%%num_initial_blocks)
+ ;; Move AAD_HASH to temp reg
+ vmovdqu %%T2, %%XMM8
+ ;; vmovdqu %%CTR, [%%GDATA_CTX + CurCount] ; %%CTR = Y0
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ ;; Compute AES counters
+ vpaddd %%CTR, %%CTR, [rel ONE] ; INCR Y0
+ vmovdqa reg(i), %%CTR
+ vpshufb reg(i), [rel SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ ; Start AES for %%num_initial_blocks blocks
+ vpxor reg(i),reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep
+
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Hash all but the last block of data
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks-1
+ ;; Encrypt the message for all but the last block
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+ %endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+%assign i (i+1)
+%endrep
+
+ ;; The final block of data may be <16B
+ sub %%LENGTH, 16*(%%num_initial_blocks-1)
+
+%if %%num_initial_blocks < 8
+ ;; NOTE: the 'jl' is always taken for num_initial_blocks = 8.
+ ;; This is run in the context of GCM_ENC_DEC_SMALL for length < 128.
+ cmp %%LENGTH, 16
+ jl %%_small_initial_partial_block
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Handle a full length final block - encrypt and hash all blocks
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ sub %%LENGTH, 16
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+
+ ;; Encrypt the message
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+ %endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+
+ ;; Hash all of the data
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+%assign last_block_to_hash 0
+
+%if(%%num_initial_blocks>last_block_to_hash)
+ ;; Hash in AES state
+ vpxor %%T2, reg(j)
+
+ ;; T2 - incoming AAD hash
+ ;; reg(i) holds ciphertext
+ ;; T5 - hash key
+ ;; T6 - updated xor
+ ;; reg(1)/xmm1 should now be available for tmp use
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T1, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T4, %%T2, %%T5, 0x00 ; %%T4 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%assign rep_count (%%num_initial_blocks-1)
+%rep rep_count
+
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T3, reg(j), %%T5, 0x11
+ vpxor %%T1, %%T1, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x00
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%endrep
+
+ ;; Record that a reduction is needed
+ mov r12, 1
+
+ jmp %%_small_initial_compute_hash
+
+
+%endif ; %if %%num_initial_blocks < 8
+
+%%_small_initial_partial_block:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Handle ghash for a <16B final block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;; In this case if it's a single call to encrypt we can
+ ;; hash all of the data but if it's an init / update / finalize
+ ;; series of call we need to leave the last block if it's
+ ;; less than a full block of data.
+
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], reg(i)
+ ;; Handle a partial final block
+ ;; GDATA, KEY, T1, T2
+ ;; r13 - length
+ ;; LT16 - indicates type of read and that the buffer is less than 16 bytes long
+ ;; NOTE: could be replaced with %%LENGTH but at this point
+ ;; %%LENGTH is always less than 16.
+ ;; No PLAIN_CYPH_LEN argument available in this macro.
+ ENCRYPT_FINAL_PARTIAL_BLOCK reg(i), %%T1, %%T3, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, LT16, %%ENC_DEC, %%DATA_OFFSET
+ vpshufb reg(i), [rel SHUF_MASK]
+
+%ifidn %%INSTANCE_TYPE, multi_call
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks-1)
+%assign last_block_to_hash 1
+%else
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+%assign last_block_to_hash 0
+%endif
+
+%if(%%num_initial_blocks>last_block_to_hash)
+ ;; Record that a reduction is needed
+ mov r12, 1
+ ;; Hash in AES state
+ vpxor %%T2, reg(j)
+
+ ;; T2 - incoming AAD hash
+ ;; reg(i) holds ciphertext
+ ;; T5 - hash key
+ ;; T6 - updated xor
+ ;; reg(1)/xmm1 should now be available for tmp use
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T1, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T4, %%T2, %%T5, 0x00 ; %%T4 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+%else
+ ;; Record that a reduction is not needed -
+ ;; In this case no hashes are computed because there
+ ;; is only one initial block and it is < 16B in length.
+ mov r12, 0
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%ifidn %%INSTANCE_TYPE, multi_call
+%assign rep_count (%%num_initial_blocks-2)
+%%_multi_call_hash:
+%else
+%assign rep_count (%%num_initial_blocks-1)
+%endif
+
+%if rep_count < 0
+ ;; quick fix for negative rep_count (to be investigated)
+%assign rep_count 0
+%endif
+
+%rep rep_count
+
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T3, reg(j), %%T5, 0x11
+ vpxor %%T1, %%T1, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x00
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%endrep
+
+%%_small_initial_compute_hash:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Ghash reduction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%if(%%num_initial_blocks=1)
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; We only need to check if a reduction is needed if
+ ;; initial_blocks == 1 and init/update/final is being used.
+ ;; In this case we may just have a partial block, and that
+ ;; gets hashed in finalize.
+ cmp r12, 0
+ je %%_no_reduction_needed
+%endif
+%endif
+
+ vpsrldq %%T3, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpslldq %%T6, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpxor %%T1, %%T1, %%T3 ; accumulate the results in %%T1:%%T4
+ vpxor %%T4, %%T6, %%T4
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; First phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x01
+ ;; shift-L xmm2 2 DWs
+ vpslldq %%T2, %%T2, 8
+ vpxor %%T4, %%T4, %%T2
+
+ ;; First phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Second phase of the reduction
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x00
+ ;; Shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+ vpsrldq %%T2, %%T2, 4
+
+ vpclmulqdq %%T4, %%T3, %%T4, 0x10
+ ;; Shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+ vpslldq %%T4, %%T4, 4
+
+ vpxor %%T4, %%T4, %%T2
+ ;; Second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T3, %%T1, %%T4
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If using init/update/finalize, we need to xor any partial block data
+ ;; into the hash.
+%if %%num_initial_blocks > 1
+ ;; NOTE: for %%num_initial_blocks = 0 the xor never takes place
+%if %%num_initial_blocks != 8
+ ;; NOTE: for %%num_initial_blocks = 8, %%LENGTH, stored in [PBlockLen] is never zero
+ cmp qword [%%GDATA_CTX + PBlockLen], 0
+ je %%_no_partial_block_xor
+%endif ; %%num_initial_blocks != 8
+ vpxor %%T3, %%T3, reg(8)
+%%_no_partial_block_xor:
+%endif ; %%num_initial_blocks > 1
+%endif ; %%INSTANCE_TYPE, multi_call
+
+%if(%%num_initial_blocks=1)
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: %%_no_reduction_needed case only valid for
+ ;; multi_call with initial_blocks = 1.
+ ;; Look for comment above around '_no_reduction_needed'
+ ;; The jmp below is obsolete as the code will fall through.
+
+ ;; The result is in %%T3
+ jmp %%_after_reduction
+
+%%_no_reduction_needed:
+ ;; The hash should end up in T3. The only way we should get here is if
+ ;; there is a partial block of data, so xor that into the hash.
+ vpxor %%T3, %%T2, reg(8)
+%endif ; %%INSTANCE_TYPE = multi_call
+%endif ; %%num_initial_blocks=1
+
+%%_after_reduction:
+ ;; Final hash is now in T3
+
+%endmacro ; INITIAL_BLOCKS_PARTIAL
+
+
+
+; encrypt 8 blocks at a time
+; ghash the 8 previously encrypted ciphertext blocks
+; %%GDATA (KEY), %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN are used as pointers only, not modified
+; %%DATA_OFFSET is the data offset value
+%macro GHASH_8_ENCRYPT_8_PARALLEL 23
+%define %%GDATA %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%DATA_OFFSET %4
+%define %%T1 %5
+%define %%T2 %6
+%define %%T3 %7
+%define %%T4 %8
+%define %%T5 %9
+%define %%T6 %10
+%define %%CTR %11
+%define %%XMM1 %12
+%define %%XMM2 %13
+%define %%XMM3 %14
+%define %%XMM4 %15
+%define %%XMM5 %16
+%define %%XMM6 %17
+%define %%XMM7 %18
+%define %%XMM8 %19
+%define %%T7 %20
+%define %%loop_idx %21
+%define %%ENC_DEC %22
+%define %%FULL_PARTIAL %23
+
+ vmovdqa %%T2, %%XMM1
+ vmovdqu [rsp + TMP2], %%XMM2
+ vmovdqu [rsp + TMP3], %%XMM3
+ vmovdqu [rsp + TMP4], %%XMM4
+ vmovdqu [rsp + TMP5], %%XMM5
+ vmovdqu [rsp + TMP6], %%XMM6
+ vmovdqu [rsp + TMP7], %%XMM7
+ vmovdqu [rsp + TMP8], %%XMM8
+
+%ifidn %%loop_idx, in_order
+ vpaddd %%XMM1, %%CTR, [rel ONE] ; INCR CNT
+ vmovdqa %%T5, [rel TWO]
+ vpaddd %%XMM2, %%CTR, %%T5
+ vpaddd %%XMM3, %%XMM1, %%T5
+ vpaddd %%XMM4, %%XMM2, %%T5
+ vpaddd %%XMM5, %%XMM3, %%T5
+ vpaddd %%XMM6, %%XMM4, %%T5
+ vpaddd %%XMM7, %%XMM5, %%T5
+ vpaddd %%XMM8, %%XMM6, %%T5
+ vmovdqa %%CTR, %%XMM8
+
+ vmovdqa %%T5, [rel SHUF_MASK]
+ vpshufb %%XMM1, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM2, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM3, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM4, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM5, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM6, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM7, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM8, %%T5 ; perform a 16Byte swap
+%else
+ vpaddd %%XMM1, %%CTR, [rel ONEf] ; INCR CNT
+ vmovdqa %%T5, [rel TWOf]
+ vpaddd %%XMM2, %%CTR, %%T5
+ vpaddd %%XMM3, %%XMM1, %%T5
+ vpaddd %%XMM4, %%XMM2, %%T5
+ vpaddd %%XMM5, %%XMM3, %%T5
+ vpaddd %%XMM6, %%XMM4, %%T5
+ vpaddd %%XMM7, %%XMM5, %%T5
+ vpaddd %%XMM8, %%XMM6, %%T5
+ vmovdqa %%CTR, %%XMM8
+%endif
+
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*0]
+ vpxor %%XMM1, %%XMM1, %%T1
+ vpxor %%XMM2, %%XMM2, %%T1
+ vpxor %%XMM3, %%XMM3, %%T1
+ vpxor %%XMM4, %%XMM4, %%T1
+ vpxor %%XMM5, %%XMM5, %%T1
+ vpxor %%XMM6, %%XMM6, %%T1
+ vpxor %%XMM7, %%XMM7, %%T1
+ vpxor %%XMM8, %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*1]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [%%GDATA + 16*2]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+ vpclmulqdq %%T4, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T7, %%T2, %%T5, 0x00 ; %%T7 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+
+ vmovdqu %%T1, [%%GDATA + 16*3]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP2]
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*4]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu %%T1, [rsp + TMP3]
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*5]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [rsp + TMP4]
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*6]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP5]
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*7]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP6]
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*8]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP7]
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + 16*9]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T1, [rsp + TMP8]
+ vmovdqu %%T5, [%%GDATA + HashKey]
+
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T1, %%T4, %%T3
+
+
+ vmovdqu %%T5, [%%GDATA + 16*10]
+ %ifndef GCM128_MODE ; GCM192 or GCM256
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*11]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*12]
+%endif
+%ifdef GCM256_MODE
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*13]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*14]
+%endif ; GCM256
+
+%assign i 0
+%assign j 1
+%rep 8
+
+ ;; SNP TBD: This is pretty ugly - consider whether just XORing the
+ ;; data in after vaesenclast is simpler and performant. Would
+ ;; also have to ripple it through partial block and ghash_mul_8.
+%ifidn %%FULL_PARTIAL, full
+ %ifdef NT_LD
+ VXLDR %%T2, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ vpxor %%T2, %%T2, %%T5
+ %else
+ vpxor %%T2, %%T5, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ %endif
+
+ %ifidn %%ENC_DEC, ENC
+ vaesenclast reg(j), reg(j), %%T2
+ %else
+ vaesenclast %%T3, reg(j), %%T2
+ vpxor reg(j), %%T2, %%T5
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*i], %%T3
+ %endif
+
+%else
+ ; Don't read the final data during partial block processing
+ %ifdef NT_LD
+ %if (i<7)
+ VXLDR %%T2, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ vpxor %%T2, %%T2, %%T5
+ %else
+ ;; Stage the key directly in T2 rather than hash it with plaintext
+ vmovdqu %%T2, %%T5
+ %endif
+ %else
+ %if (i<7)
+ vpxor %%T2, %%T5, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ %else
+ ;; Stage the key directly in T2 rather than hash it with plaintext
+ vmovdqu %%T2, %%T5
+ %endif
+ %endif
+
+ %ifidn %%ENC_DEC, ENC
+ vaesenclast reg(j), reg(j), %%T2
+ %else
+ %if (i<7)
+ vaesenclast %%T3, reg(j), %%T2
+ vpxor reg(j), %%T2, %%T5
+ ;; Do not read the data since it could fault
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*i], %%T3
+ %else
+ vaesenclast reg(j), reg(j), %%T2
+ %endif
+ %endif
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ vpslldq %%T3, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpsrldq %%T6, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpxor %%T7, %%T7, %%T3
+ vpxor %%T1, %%T1, %%T6 ; accumulate the results in %%T1:%%T7
+
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ %ifidn %%ENC_DEC, ENC
+ ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*0], %%XMM1
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*1], %%XMM2
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*2], %%XMM3
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*3], %%XMM4
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*4], %%XMM5
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*5], %%XMM6
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*6], %%XMM7
+ %ifidn %%FULL_PARTIAL, full
+ ;; Avoid writing past the buffer if handling a partial block
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*7], %%XMM8
+ %endif
+ %endif
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T1, %%T1, %%T4 ; the result is in %%T1
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+
+ vpxor %%XMM1, %%T1
+
+
+%endmacro ; GHASH_8_ENCRYPT_8_PARALLEL
+
+
+; GHASH the last 4 ciphertext blocks.
+%macro GHASH_LAST_8 16
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+%define %%XMM8 %16
+
+ ;; Karatsuba Method
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+
+ vpshufd %%T2, %%XMM1, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM1
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T6, %%XMM1, %%T5, 0x11
+ vpclmulqdq %%T7, %%XMM1, %%T5, 0x00
+
+ vpclmulqdq %%XMM1, %%T2, %%T3, 0x00
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM2
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpshufd %%T2, %%XMM3, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM3
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpshufd %%T2, %%XMM4, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM4
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpshufd %%T2, %%XMM5, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM5
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpshufd %%T2, %%XMM6, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM6
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpshufd %%T2, %%XMM7, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM7
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey]
+ vpshufd %%T2, %%XMM8, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM8
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+ vpxor %%XMM1, %%XMM1, %%T6
+ vpxor %%T2, %%XMM1, %%T7
+
+
+
+
+ vpslldq %%T4, %%T2, 8
+ vpsrldq %%T2, %%T2, 8
+
+ vpxor %%T7, %%T7, %%T4
+ vpxor %%T6, %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L %%T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T6, %%T6, %%T4 ; the result is in %%T6
+%endmacro
+
+
+; GHASH the last 4 ciphertext blocks.
+%macro GHASH_LAST_7 15
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+
+ ;; Karatsuba Method
+
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+
+ vpshufd %%T2, %%XMM1, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM1
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T6, %%XMM1, %%T5, 0x11
+ vpclmulqdq %%T7, %%XMM1, %%T5, 0x00
+
+ vpclmulqdq %%XMM1, %%T2, %%T3, 0x00
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM2
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpshufd %%T2, %%XMM3, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM3
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpshufd %%T2, %%XMM4, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM4
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpshufd %%T2, %%XMM5, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM5
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpshufd %%T2, %%XMM6, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM6
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_1]
+ vpshufd %%T2, %%XMM7, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM7
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpxor %%XMM1, %%XMM1, %%T6
+ vpxor %%T2, %%XMM1, %%T7
+
+
+
+
+ vpslldq %%T4, %%T2, 8
+ vpsrldq %%T2, %%T2, 8
+
+ vpxor %%T7, %%T7, %%T4
+ vpxor %%T6, %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L %%T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T6, %%T6, %%T4 ; the result is in %%T6
+%endmacro
+
+
+
+;;; Handle encryption of the final partial block
+;;; IN:
+;;; r13 - Number of bytes to read
+;;; MODIFIES:
+;;; KEY - Key for encrypting the partial block
+;;; HASH - Current hash value
+;;; SMASHES:
+;;; r10, r12, r15, rax
+;;; T1, T2
+;;; Note:
+;;; PLAIN_CYPH_LEN, %7, is passed only to determine
+;;; if buffer is big enough to do a 16 byte read & shift.
+;;; 'LT16' is passed here only if buffer is known to be smaller
+;;; than 16 bytes.
+;;; Any other value passed here will result in 16 byte read
+;;; code path.
+;;; TBD: Remove HASH from the instantiation
+%macro ENCRYPT_FINAL_PARTIAL_BLOCK 8
+%define %%KEY %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%CYPH_PLAIN_OUT %4
+%define %%PLAIN_CYPH_IN %5
+%define %%PLAIN_CYPH_LEN %6
+%define %%ENC_DEC %7
+%define %%DATA_OFFSET %8
+
+ ;; NOTE: type of read tuned based %%PLAIN_CYPH_LEN setting
+%ifidn %%PLAIN_CYPH_LEN, LT16
+ ;; Handle the case where the message is < 16 bytes
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+
+ ;; T1 - packed output
+ ;; r10 - input data address
+ ;; r13 - input data length
+ ;; r12, r15, rax - temp registers
+ READ_SMALL_DATA_INPUT %%T1, r10, r13, r12, r15, rax
+
+ lea r12, [SHIFT_MASK + 16]
+ sub r12, r13
+%else
+ ;; Handle the case where the message is >= 16 bytes
+ sub %%DATA_OFFSET, 16
+ add %%DATA_OFFSET, r13
+ ;; Receive the last <16 Byte block
+ vmovdqu %%T1, [%%PLAIN_CYPH_IN+%%DATA_OFFSET]
+ sub %%DATA_OFFSET, r13
+ add %%DATA_OFFSET, 16
+
+ lea r12, [SHIFT_MASK + 16]
+ ;; Adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ ;; (r13 is the number of bytes in plaintext mod 16)
+ sub r12, r13
+ ;; Get the appropriate shuffle mask
+ vmovdqu %%T2, [r12]
+ ;; shift right 16-r13 bytes
+ vpshufb %%T1, %%T2
+%endif ; %%PLAIN_CYPH_LEN, LT16
+
+ ;; At this point T1 contains the partial block data
+%ifidn %%ENC_DEC, DEC
+ ;; Plaintext XOR E(K, Yn)
+ ;; Set aside the ciphertext
+ vmovdqa %%T2, %%T1
+ vpxor %%KEY, %%KEY, %%T1
+ ;; Get the appropriate mask to mask out top 16-r13 bytes of ciphertext
+ vmovdqu %%T1, [r12 + ALL_F - SHIFT_MASK]
+ ;; Mask out top 16-r13 bytes of ciphertext
+ vpand %%KEY, %%KEY, %%T1
+
+ ;; Prepare the ciphertext for the hash
+ ;; mask out top 16-r13 bytes of the plaintext
+ vpand %%T2, %%T2, %%T1
+%else
+ ;; Plaintext XOR E(K, Yn)
+ vpxor %%KEY, %%KEY, %%T1
+ ;; Get the appropriate mask to mask out top 16-r13 bytes of %%KEY
+ vmovdqu %%T1, [r12 + ALL_F - SHIFT_MASK]
+ ;; Mask out top 16-r13 bytes of %%KEY
+ vpand %%KEY, %%KEY, %%T1
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Output r13 Bytes
+ vmovq rax, %%KEY
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ vpsrldq %%T1, %%KEY, 8
+ vmovq rax, %%T1
+ sub r13, 8
+
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%ifidn %%ENC_DEC, DEC
+ ;; If decrypt, restore the ciphertext into %%KEY
+ vmovdqu %%KEY, %%T2
+%endif
+%endmacro ; ENCRYPT_FINAL_PARTIAL_BLOCK
+
+
+
+; Encryption of a single block
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ vpxor %%XMM0, %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ vaesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep
+ vaesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+
+;; Start of Stack Setup
+
+%macro FUNC_SAVE 0
+ ;; Required for Update/GMC_ENC
+ ;the number of pushes must equal STACK_OFFSET
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+ vmovdqu [rsp + LOCAL_STORAGE + 1*16],xmm7
+ vmovdqu [rsp + LOCAL_STORAGE + 2*16],xmm8
+ vmovdqu [rsp + LOCAL_STORAGE + 3*16],xmm9
+ vmovdqu [rsp + LOCAL_STORAGE + 4*16],xmm10
+ vmovdqu [rsp + LOCAL_STORAGE + 5*16],xmm11
+ vmovdqu [rsp + LOCAL_STORAGE + 6*16],xmm12
+ vmovdqu [rsp + LOCAL_STORAGE + 7*16],xmm13
+ vmovdqu [rsp + LOCAL_STORAGE + 8*16],xmm14
+ vmovdqu [rsp + LOCAL_STORAGE + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_ymms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + LOCAL_STORAGE + 9*16]
+ vmovdqu xmm14, [rsp + LOCAL_STORAGE + 8*16]
+ vmovdqu xmm13, [rsp + LOCAL_STORAGE + 7*16]
+ vmovdqu xmm12, [rsp + LOCAL_STORAGE + 6*16]
+ vmovdqu xmm11, [rsp + LOCAL_STORAGE + 5*16]
+ vmovdqu xmm10, [rsp + LOCAL_STORAGE + 4*16]
+ vmovdqu xmm9, [rsp + LOCAL_STORAGE + 3*16]
+ vmovdqu xmm8, [rsp + LOCAL_STORAGE + 2*16]
+ vmovdqu xmm7, [rsp + LOCAL_STORAGE + 1*16]
+ vmovdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+
+;; Required for Update/GMC_ENC
+ mov rsp, r14
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_INIT initializes a gcm_context_data struct to prepare for encoding/decoding.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), IV,
+; Additional Authentication data (A_IN), Additional Data length (A_LEN).
+; Output: Updated GDATA_CTX with the hash of A_IN (AadHash) and initialized other parts of GDATA_CTX.
+; Clobbers rax, r10-r13, and xmm0-xmm6
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_INIT 5
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%IV %3
+%define %%A_IN %4
+%define %%A_LEN %5
+%define %%AAD_HASH xmm14
+
+
+ mov r10, %%A_LEN
+ cmp r10, 0
+ je %%_aad_is_zero
+
+ CALC_AAD_HASH %%A_IN, %%A_LEN, %%AAD_HASH, %%GDATA_KEY, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, r10, r11, r12, r13, rax
+ jmp %%_after_aad
+
+%%_aad_is_zero:
+ vpxor %%AAD_HASH, %%AAD_HASH
+
+%%_after_aad:
+ mov r10, %%A_LEN
+ vpxor xmm2, xmm3
+
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH ; ctx_data.aad hash = aad_hash
+ mov [%%GDATA_CTX + AadLen], r10 ; ctx_data.aad_length = aad_length
+ xor r10, r10
+ mov [%%GDATA_CTX + InLen], r10 ; ctx_data.in_length = 0
+ mov [%%GDATA_CTX + PBlockLen], r10 ; ctx_data.partial_block_length = 0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm2 ; ctx_data.partial_block_enc_key = 0
+ mov r10, %%IV
+ vmovdqa xmm2, [rel ONEf] ; read 12 IV bytes and pad with 0x00000001
+ vpinsrq xmm2, [r10], 0
+ vpinsrd xmm2, [r10+8], 2
+ vmovdqu [%%GDATA_CTX + OrigIV], xmm2 ; ctx_data.orig_IV = iv
+
+ vpshufb xmm2, [rel SHUF_MASK]
+
+ vmovdqu [%%GDATA_CTX + CurCount], xmm2 ; ctx_data.current_counter = iv
+%endmacro
+
+%macro GCM_ENC_DEC_SMALL 12
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%DATA_OFFSET %7
+%define %%LENGTH %8
+%define %%NUM_BLOCKS %9
+%define %%CTR %10
+%define %%HASH %11
+%define %%INSTANCE_TYPE %12
+
+ ;; NOTE: the check below is obsolete in current implementation. The check is already done in GCM_ENC_DEC.
+ ;; cmp %%NUM_BLOCKS, 0
+ ;; je %%_small_initial_blocks_encrypted
+ cmp %%NUM_BLOCKS, 8
+ je %%_small_initial_num_blocks_is_8
+ cmp %%NUM_BLOCKS, 7
+ je %%_small_initial_num_blocks_is_7
+ cmp %%NUM_BLOCKS, 6
+ je %%_small_initial_num_blocks_is_6
+ cmp %%NUM_BLOCKS, 5
+ je %%_small_initial_num_blocks_is_5
+ cmp %%NUM_BLOCKS, 4
+ je %%_small_initial_num_blocks_is_4
+ cmp %%NUM_BLOCKS, 3
+ je %%_small_initial_num_blocks_is_3
+ cmp %%NUM_BLOCKS, 2
+ je %%_small_initial_num_blocks_is_2
+
+ jmp %%_small_initial_num_blocks_is_1
+
+
+%%_small_initial_num_blocks_is_8:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 8, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_7:
+ ;; r13 - %%LENGTH
+ ;; xmm12 - T1
+ ;; xmm13 - T2
+ ;; xmm14 - T3 - AAD HASH OUT when not producing 8 AES keys
+ ;; xmm15 - T4
+ ;; xmm11 - T5
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1 - Cipher + Hash when producing 8 AES keys
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8 - AAD HASH IN
+ ;; xmm10 - T6
+ ;; xmm0 - T_key
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 7, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_6:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 6, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_5:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 5, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_4:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 4, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_3:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 3, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_2:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 2, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_1:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 1, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+
+ ;; Note: zero initial blocks not allowed.
+
+%%_small_initial_blocks_encrypted:
+
+%endmacro ; GCM_ENC_DEC_SMALL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context_data struct
+; has been initialized by GCM_INIT
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: gcm_key_data struct* (GDATA_KEY), gcm_context_data *(GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN) and whether encoding or decoding (ENC_DEC).
+; Output: A cypher of the given plain text (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10-r15, and xmm0-xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_ENC_DEC 7
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%INSTANCE_TYPE %7
+%define %%DATA_OFFSET r11
+
+; Macro flow:
+; calculate the number of 16byte blocks in the message
+; process (number of 16byte blocks) mod 8 '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+; process 8 16 byte blocks at a time until all are done '%%_encrypt_by_8_new .. %%_eight_cipher_left'
+; if there is a block of less tahn 16 bytes process it '%%_zero_cipher_left .. %%_multiple_of_16_bytes'
+
+ cmp %%PLAIN_CYPH_LEN, 0
+ je %%_enc_dec_done
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+ ;; Update length of data processed
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + InLen], rax
+%else
+ add [%%GDATA_CTX + InLen], %%PLAIN_CYPH_LEN
+%endif
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+ vmovdqu xmm8, [%%GDATA_CTX + AadHash]
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: partial block processing makes only sense for multi_call here.
+ ;; Used for the update flow - if there was a previous partial
+ ;; block fill the remaining bytes here.
+ PARTIAL_BLOCK %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%DATA_OFFSET, xmm8, %%ENC_DEC
+%endif
+
+ ;; lift CTR set from initial_blocks to here
+%ifidn %%INSTANCE_TYPE, single_call
+ vmovdqu xmm9, xmm2
+%else
+ vmovdqu xmm9, [%%GDATA_CTX + CurCount]
+%endif
+
+ ;; Save the amount of data left to process in r10
+ mov r13, %%PLAIN_CYPH_LEN
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: %%DATA_OFFSET is zero in single_call case.
+ ;; Consequently PLAIN_CYPH_LEN will never be zero after
+ ;; %%DATA_OFFSET subtraction below.
+ sub r13, %%DATA_OFFSET
+
+ ;; There may be no more data if it was consumed in the partial block.
+ cmp r13, 0
+ je %%_enc_dec_done
+%endif ; %%INSTANCE_TYPE, multi_call
+ mov r10, r13
+
+ ;; Determine how many blocks to process in INITIAL
+ mov r12, r13
+ shr r12, 4
+ and r12, 7
+
+ ;; Process one additional block in INITIAL if there is a partial block
+ and r10, 0xf
+ blsmsk r10, r10 ; Set CF if zero
+ cmc ; Flip CF
+ adc r12, 0x0 ; Process an additional INITIAL block if CF set
+
+ ;; Less than 127B will be handled by the small message code, which
+ ;; can process up to 7 16B blocks.
+ cmp r13, 128
+ jge %%_large_message_path
+
+ GCM_ENC_DEC_SMALL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%ENC_DEC, %%DATA_OFFSET, r13, r12, xmm9, xmm14, %%INSTANCE_TYPE
+ jmp %%_ghash_done
+
+%%_large_message_path:
+ and r12, 0x7 ; Still, don't allow 8 INITIAL blocks since this will
+ ; can be handled by the x8 partial loop.
+
+ cmp r12, 0
+ je %%_initial_num_blocks_is_0
+ cmp r12, 7
+ je %%_initial_num_blocks_is_7
+ cmp r12, 6
+ je %%_initial_num_blocks_is_6
+ cmp r12, 5
+ je %%_initial_num_blocks_is_5
+ cmp r12, 4
+ je %%_initial_num_blocks_is_4
+ cmp r12, 3
+ je %%_initial_num_blocks_is_3
+ cmp r12, 2
+ je %%_initial_num_blocks_is_2
+
+ jmp %%_initial_num_blocks_is_1
+
+%%_initial_num_blocks_is_7:
+ ;; r13 - %%LENGTH
+ ;; xmm12 - T1
+ ;; xmm13 - T2
+ ;; xmm14 - T3 - AAD HASH OUT when not producing 8 AES keys
+ ;; xmm15 - T4
+ ;; xmm11 - T5
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1 - Cipher + Hash when producing 8 AES keys
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8 - AAD HASH IN
+ ;; xmm10 - T6
+ ;; xmm0 - T_key
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 7, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_6:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 6, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_5:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 5, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_4:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 4, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_3:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 3, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_2:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 2, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_1:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 1, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_0:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 0, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+
+
+%%_initial_blocks_encrypted:
+ ;; The entire message was encrypted processed in initial and now need to be hashed
+ cmp r13, 0
+ je %%_encrypt_done
+
+ ;; Encrypt the final <16 byte (partial) block, then hash
+ cmp r13, 16
+ jl %%_encrypt_final_partial
+
+ ;; Process 7 full blocks plus a partial block
+ cmp r13, 128
+ jl %%_encrypt_by_8_partial
+
+
+%%_encrypt_by_8_parallel:
+ ;; in_order vs. out_order is an optimization to increment the counter without shuffling
+ ;; it back into little endian. r15d keeps track of when we need to increent in order so
+ ;; that the carry is handled correctly.
+ vmovd r15d, xmm9
+ and r15d, 255
+ vpshufb xmm9, [rel SHUF_MASK]
+
+
+%%_encrypt_by_8_new:
+ cmp r15d, 255-8
+ jg %%_encrypt_by_8
+
+
+
+ ;; xmm0 - T1
+ ;; xmm10 - T2
+ ;; xmm11 - T3
+ ;; xmm12 - T4
+ ;; xmm13 - T5
+ ;; xmm14 - T6
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8
+ ;; xmm15 - T7
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, out_order, %%ENC_DEC, full
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ cmp r13, 128
+ jge %%_encrypt_by_8_new
+
+ vpshufb xmm9, [rel SHUF_MASK]
+ jmp %%_encrypt_by_8_parallel_done
+
+%%_encrypt_by_8:
+ vpshufb xmm9, [rel SHUF_MASK]
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC, full
+ vpshufb xmm9, [rel SHUF_MASK]
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ cmp r13, 128
+ jge %%_encrypt_by_8_new
+ vpshufb xmm9, [rel SHUF_MASK]
+
+
+%%_encrypt_by_8_parallel_done:
+ ;; Test to see if we need a by 8 with partial block. At this point
+ ;; bytes remaining should be either zero or between 113-127.
+ cmp r13, 0
+ je %%_encrypt_done
+
+%%_encrypt_by_8_partial:
+ ;; Shuffle needed to align key for partial block xor. out_order
+ ;; is a little faster because it avoids extra shuffles.
+ ;; TBD: Might need to account for when we don't have room to increment the counter.
+
+
+ ;; Process parallel buffers with a final partial block.
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC, partial
+
+
+ add %%DATA_OFFSET, 128-16
+ sub r13, 128-16
+
+%%_encrypt_final_partial:
+
+ vpshufb xmm8, [rel SHUF_MASK]
+ mov [%%GDATA_CTX + PBlockLen], r13
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm8
+
+ ;; xmm8 - Final encrypted counter - need to hash with partial or full block ciphertext
+ ;; GDATA, KEY, T1, T2
+ ENCRYPT_FINAL_PARTIAL_BLOCK xmm8, xmm0, xmm10, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%ENC_DEC, %%DATA_OFFSET
+
+ vpshufb xmm8, [rel SHUF_MASK]
+
+
+%%_encrypt_done:
+
+ ;; Mapping to macro parameters
+ ;; IN:
+ ;; xmm9 contains the counter
+ ;; xmm1-xmm8 contain the xor'd ciphertext
+ ;; OUT:
+ ;; xmm14 contains the final hash
+ ;; GDATA, T1, T2, T3, T4, T5, T6, T7, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+%ifidn %%INSTANCE_TYPE, multi_call
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ jz %%_hash_last_8
+ GHASH_LAST_7 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+ ;; XOR the partial word into the hash
+ vpxor xmm14, xmm14, xmm8
+ jmp %%_ghash_done
+%endif
+%%_hash_last_8:
+ GHASH_LAST_8 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+
+%%_ghash_done:
+ vmovdqu [%%GDATA_CTX + CurCount], xmm9 ; my_ctx_data.current_counter = xmm9
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14 ; my_ctx_data.aad hash = xmm14
+
+%%_enc_dec_done:
+
+
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_COMPLETE Finishes Encyrption/Decryption of last partial block after GCM_UPDATE finishes.
+; Input: A gcm_key_data * (GDATA_KEY), gcm_context_data (GDATA_CTX) and whether encoding or decoding (ENC_DEC).
+; Output: Authorization Tag (AUTH_TAG) and Authorization Tag length (AUTH_TAG_LEN)
+; Clobbers rax, r10-r12, and xmm0, xmm1, xmm5, xmm6, xmm9, xmm11, xmm14, xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_COMPLETE 6
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%AUTH_TAG %3
+%define %%AUTH_TAG_LEN %4
+%define %%ENC_DEC %5
+%define %%INSTANCE_TYPE %6
+%define %%PLAIN_CYPH_LEN rax
+
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+ ;; Start AES as early as possible
+ vmovdqu xmm9, [%%GDATA_CTX + OrigIV] ; xmm9 = Y0
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9 ; E(K, Y0)
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If the GCM function is called as a single function call rather
+ ;; than invoking the individual parts (init, update, finalize) we
+ ;; can remove a write to read dependency on AadHash.
+ vmovdqu xmm14, [%%GDATA_CTX + AadHash]
+
+ ;; Encrypt the final partial block. If we did this as a single call then
+ ;; the partial block was handled in the main GCM_ENC_DEC macro.
+ mov r12, [%%GDATA_CTX + PBlockLen]
+ cmp r12, 0
+
+ je %%_partial_done
+
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+%%_partial_done:
+
+%endif
+
+ mov r12, [%%GDATA_CTX + AadLen] ; r12 = aadLen (number of bytes)
+ mov %%PLAIN_CYPH_LEN, [%%GDATA_CTX + InLen]
+
+ shl r12, 3 ; convert into number of bits
+ vmovd xmm15, r12d ; len(A) in xmm15
+
+ shl %%PLAIN_CYPH_LEN, 3 ; len(C) in bits (*128)
+ vmovq xmm1, %%PLAIN_CYPH_LEN
+ vpslldq xmm15, xmm15, 8 ; xmm15 = len(A)|| 0x0000000000000000
+ vpxor xmm15, xmm15, xmm1 ; xmm15 = len(A)||len(C)
+
+ vpxor xmm14, xmm15
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6
+ vpshufb xmm14, [rel SHUF_MASK] ; perform a 16Byte swap
+
+ vpxor xmm9, xmm9, xmm14
+
+
+%%_return_T:
+ mov r10, %%AUTH_TAG ; r10 = authTag
+ mov r11, %%AUTH_TAG_LEN ; r11 = auth_tag_len
+
+ cmp r11, 16
+ je %%_T_16
+
+ cmp r11, 12
+ je %%_T_12
+
+ cmp r11, 8
+ je %%_T_8
+
+ simd_store_avx r10, xmm9, r11, r12, rax
+ jmp %%_return_T_done
+%%_T_8:
+ vmovq rax, xmm9
+ mov [r10], rax
+ jmp %%_return_T_done
+%%_T_12:
+ vmovq rax, xmm9
+ mov [r10], rax
+ vpsrldq xmm9, xmm9, 8
+ vmovd eax, xmm9
+ mov [r10 + 8], eax
+ jmp %%_return_T_done
+%%_T_16:
+ vmovdqu [r10], xmm9
+
+%%_return_T_done:
+
+%ifdef SAFE_DATA
+ ;; Clear sensitive data from context structure
+ vpxor xmm0, xmm0
+ vmovdqu [%%GDATA_CTX + AadHash], xmm0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm0
+%endif
+%endmacro ; GCM_COMPLETE
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_precomp_128_avx_gen4 /
+; aes_gcm_precomp_192_avx_gen4 /
+; aes_gcm_precomp_256_avx_gen4
+; (struct gcm_key_data *key_data)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(precomp,_),function,)
+FN_NAME(precomp,_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_precomp
+%endif
+
+ push r12
+ push r13
+ push r14
+ push r15
+
+ mov r14, rsp
+
+
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63 ; align rsp to 64 bytes
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; only xmm6 needs to be maintained
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+%endif
+
+ vpxor xmm6, xmm6
+ ENCRYPT_SINGLE_BLOCK arg1, xmm6 ; xmm6 = HashKey
+
+ vpshufb xmm6, [rel SHUF_MASK]
+ ;;;;;;;;;;;;;;; PRECOMPUTATION of HashKey<<1 mod poly from the HashKey;;;;;;;;;;;;;;;
+ vmovdqa xmm2, xmm6
+ vpsllq xmm6, xmm6, 1
+ vpsrlq xmm2, xmm2, 63
+ vmovdqa xmm1, xmm2
+ vpslldq xmm2, xmm2, 8
+ vpsrldq xmm1, xmm1, 8
+ vpor xmm6, xmm6, xmm2
+ ;reduction
+ vpshufd xmm2, xmm1, 00100100b
+ vpcmpeqd xmm2, [rel TWOONE]
+ vpand xmm2, xmm2, [rel POLY]
+ vpxor xmm6, xmm6, xmm2 ; xmm6 holds the HashKey<<1 mod poly
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu [arg1 + HashKey], xmm6 ; store HashKey<<1 mod poly
+
+
+ PRECOMPUTE arg1, xmm6, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+ mov rsp, r14
+
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_ymms_asm
+%endif
+exit_precomp:
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_init_128_avx_gen4 / aes_gcm_init_192_avx_gen4 / aes_gcm_init_256_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(init,_),function,)
+FN_NAME(init,_):
+ push r12
+ push r13
+%ifidn __OUTPUT_FORMAT__, win64
+ push r14
+ push r15
+ mov r14, rsp
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 1*16
+ movdqu [rsp + 0*16], xmm6
+%endif
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_init
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_init
+
+ ;; Check IV != NULL
+ cmp arg3, 0
+ jz exit_init
+
+ ;; Check if aad_len == 0
+ cmp arg5, 0
+ jz skip_aad_check_init
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg4, 0
+ jz exit_init
+
+skip_aad_check_init:
+%endif
+ GCM_INIT arg1, arg2, arg3, arg4, arg5
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_ymms_asm
+%endif
+exit_init:
+
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm6 , [rsp + 0*16]
+ mov rsp, r14
+ pop r15
+ pop r14
+%endif
+ pop r13
+ pop r12
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_update_avx_gen4 / aes_gcm_enc_192_update_avx_gen4 /
+; aes_gcm_enc_128_update_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_update_),function,)
+FN_NAME(enc,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_enc
+
+skip_in_out_check_update_enc:
+%endif
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, multi_call
+
+exit_update_enc:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_update_avx_gen4 / aes_gcm_dec_192_update_avx_gen4 /
+; aes_gcm_dec_256_update_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_update_),function,)
+FN_NAME(dec,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_dec
+
+skip_in_out_check_update_dec:
+%endif
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, multi_call
+
+exit_update_dec:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_finalize_avx_gen4 / aes_gcm_enc_192_finalize_avx_gen4 /
+; aes_gcm_enc_256_finalize_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_finalize_),function,)
+FN_NAME(enc,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_enc_fin
+
+ cmp arg4, 16
+ ja exit_enc_fin
+%endif
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16], xmm6
+ vmovdqu [rsp + 1*16], xmm9
+ vmovdqu [rsp + 2*16], xmm11
+ vmovdqu [rsp + 3*16], xmm14
+ vmovdqu [rsp + 4*16], xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, ENC, multi_call
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_ymms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + 4*16]
+ vmovdqu xmm14, [rsp + 3*16]
+ vmovdqu xmm11, [rsp + 2*16]
+ vmovdqu xmm9, [rsp + 1*16]
+ vmovdqu xmm6, [rsp + 0*16]
+ add rsp, 5*16
+%endif
+ pop r12
+exit_enc_fin:
+
+ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_finalize_avx_gen4 / aes_gcm_dec_192_finalize_avx_gen4
+; aes_gcm_dec_256_finalize_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_finalize_),function,)
+FN_NAME(dec,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_dec_fin
+
+ cmp arg4, 16
+ ja exit_dec_fin
+%endif
+
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16], xmm6
+ vmovdqu [rsp + 1*16], xmm9
+ vmovdqu [rsp + 2*16], xmm11
+ vmovdqu [rsp + 3*16], xmm14
+ vmovdqu [rsp + 4*16], xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, DEC, multi_call
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_ymms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + 4*16]
+ vmovdqu xmm14, [rsp + 3*16]
+ vmovdqu xmm11, [rsp + 2*16]
+ vmovdqu xmm9, [rsp + 1*16]
+ vmovdqu xmm6, [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+exit_dec_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_avx_gen4 / aes_gcm_enc_192_avx_gen4 / aes_gcm_enc_256_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_),function,)
+FN_NAME(enc,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_enc
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_enc
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_enc
+
+ cmp arg10, 16
+ ja exit_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_enc
+
+skip_in_out_check_enc:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_enc
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_enc
+
+skip_aad_check_enc:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, single_call
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, ENC, single_call
+
+exit_enc:
+ FUNC_RESTORE
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_avx_gen4 / aes_gcm_dec_192_avx_gen4 / aes_gcm_dec_256_avx_gen4
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_),function,)
+FN_NAME(dec,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_dec
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_dec
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_dec
+
+ cmp arg10, 16
+ ja exit_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_dec
+
+skip_in_out_check_dec:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_dec
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_dec
+
+skip_aad_check_dec:
+%endif
+
+
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, single_call
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, DEC, single_call
+
+exit_dec:
+ FUNC_RESTORE
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_avx2.c b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_avx2.c
new file mode 100644
index 000000000..7133e64c6
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_avx2.c
@@ -0,0 +1,676 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define AVX2
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_ymms
+
+#include "intel-ipsec-mb.h"
+#include "include/kasumi_internal.h"
+#include "include/zuc_internal.h"
+#include "include/snow3g.h"
+
+#include "save_xmms.h"
+#include "asm.h"
+#include "des.h"
+#include "cpu_feature.h"
+#include "noaesni.h"
+
+JOB_AES_HMAC *submit_job_aes128_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes128_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes192_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes256_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cntr_avx(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_aes_cntr_bit_avx(JOB_AES_HMAC *job);
+
+#define SAVE_XMMS save_xmms_avx
+#define RESTORE_XMMS restore_xmms_avx
+
+#define SUBMIT_JOB_AES128_ENC submit_job_aes128_enc_avx
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_avx
+#define FLUSH_JOB_AES128_ENC flush_job_aes128_enc_avx
+
+#define SUBMIT_JOB_AES192_ENC submit_job_aes192_enc_avx
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_avx
+#define FLUSH_JOB_AES192_ENC flush_job_aes192_enc_avx
+
+#define SUBMIT_JOB_AES256_ENC submit_job_aes256_enc_avx
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_avx
+#define FLUSH_JOB_AES256_ENC flush_job_aes256_enc_avx
+
+#define SUBMIT_JOB_AES_ECB_128_ENC submit_job_aes_ecb_128_enc_avx
+#define SUBMIT_JOB_AES_ECB_128_DEC submit_job_aes_ecb_128_dec_avx
+#define SUBMIT_JOB_AES_ECB_192_ENC submit_job_aes_ecb_192_enc_avx
+#define SUBMIT_JOB_AES_ECB_192_DEC submit_job_aes_ecb_192_dec_avx
+#define SUBMIT_JOB_AES_ECB_256_ENC submit_job_aes_ecb_256_enc_avx
+#define SUBMIT_JOB_AES_ECB_256_DEC submit_job_aes_ecb_256_dec_avx
+
+#define SUBMIT_JOB_AES_CNTR submit_job_aes_cntr_avx
+#define SUBMIT_JOB_AES_CNTR_BIT submit_job_aes_cntr_bit_avx
+
+#define AES_CBC_DEC_128 aes_cbc_dec_128_avx
+#define AES_CBC_DEC_192 aes_cbc_dec_192_avx
+#define AES_CBC_DEC_256 aes_cbc_dec_256_avx
+
+#define AES_CNTR_128 aes_cntr_128_avx
+#define AES_CNTR_192 aes_cntr_192_avx
+#define AES_CNTR_256 aes_cntr_256_avx
+
+#define AES_CNTR_CCM_128 aes_cntr_ccm_128_avx
+
+#define AES_ECB_ENC_128 aes_ecb_enc_128_avx
+#define AES_ECB_ENC_192 aes_ecb_enc_192_avx
+#define AES_ECB_ENC_256 aes_ecb_enc_256_avx
+#define AES_ECB_DEC_128 aes_ecb_dec_128_avx
+#define AES_ECB_DEC_192 aes_ecb_dec_192_avx
+#define AES_ECB_DEC_256 aes_ecb_dec_256_avx
+
+#define SUBMIT_JOB_PON_ENC submit_job_pon_enc_avx
+#define SUBMIT_JOB_PON_DEC submit_job_pon_dec_avx
+#define SUBMIT_JOB_PON_ENC_NO_CTR submit_job_pon_enc_no_ctr_avx
+#define SUBMIT_JOB_PON_DEC_NO_CTR submit_job_pon_dec_no_ctr_avx
+
+#ifndef NO_GCM
+#define AES_GCM_DEC_128 aes_gcm_dec_128_avx_gen4
+#define AES_GCM_ENC_128 aes_gcm_enc_128_avx_gen4
+#define AES_GCM_DEC_192 aes_gcm_dec_192_avx_gen4
+#define AES_GCM_ENC_192 aes_gcm_enc_192_avx_gen4
+#define AES_GCM_DEC_256 aes_gcm_dec_256_avx_gen4
+#define AES_GCM_ENC_256 aes_gcm_enc_256_avx_gen4
+
+#define SUBMIT_JOB_AES_GCM_DEC submit_job_aes_gcm_dec_avx2
+#define FLUSH_JOB_AES_GCM_DEC flush_job_aes_gcm_dec_avx2
+#define SUBMIT_JOB_AES_GCM_ENC submit_job_aes_gcm_enc_avx2
+#define FLUSH_JOB_AES_GCM_ENC flush_job_aes_gcm_enc_avx2
+#endif /* NO_GCM */
+
+#define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_avx
+#define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_avx
+
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_avx
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_avx
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_avx
+#define QUEUE_SIZE queue_size_avx2
+
+#define SUBMIT_JOB_AES_ENC SUBMIT_JOB_AES_ENC_AVX2
+#define FLUSH_JOB_AES_ENC FLUSH_JOB_AES_ENC_AVX2
+#define SUBMIT_JOB_AES_DEC SUBMIT_JOB_AES_DEC_AVX2
+
+
+
+JOB_AES_HMAC *submit_job_hmac_avx2(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_avx2(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_avx2(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_avx2(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_avx2(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_avx2(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_384_avx2(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_384_avx2(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_512_avx2(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_512_avx2(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_md5_avx2(MB_MGR_HMAC_MD5_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_md5_avx2(MB_MGR_HMAC_MD5_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state);
+
+#define SUBMIT_JOB_HMAC submit_job_hmac_avx2
+#define FLUSH_JOB_HMAC flush_job_hmac_avx2
+#define SUBMIT_JOB_HMAC_SHA_224 submit_job_hmac_sha_224_avx2
+#define FLUSH_JOB_HMAC_SHA_224 flush_job_hmac_sha_224_avx2
+#define SUBMIT_JOB_HMAC_SHA_256 submit_job_hmac_sha_256_avx2
+#define FLUSH_JOB_HMAC_SHA_256 flush_job_hmac_sha_256_avx2
+#define SUBMIT_JOB_HMAC_SHA_384 submit_job_hmac_sha_384_avx2
+#define FLUSH_JOB_HMAC_SHA_384 flush_job_hmac_sha_384_avx2
+#define SUBMIT_JOB_HMAC_SHA_512 submit_job_hmac_sha_512_avx2
+#define FLUSH_JOB_HMAC_SHA_512 flush_job_hmac_sha_512_avx2
+#define SUBMIT_JOB_HMAC_MD5 submit_job_hmac_md5_avx2
+#define FLUSH_JOB_HMAC_MD5 flush_job_hmac_md5_avx2
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB submit_job_avx2
+#define FLUSH_JOB flush_job_avx2
+#define SUBMIT_JOB_NOCHECK submit_job_nocheck_avx2
+#define QUEUE_SIZE queue_size_avx2
+#define GET_NEXT_JOB get_next_job_avx2
+#define GET_COMPLETED_JOB get_completed_job_avx2
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB_HASH SUBMIT_JOB_HASH_AVX2
+#define FLUSH_JOB_HASH FLUSH_JOB_HASH_AVX2
+
+/* ====================================================================== */
+
+#define AES_CFB_128_ONE aes_cfb_128_one_avx2
+
+void aes128_cbc_mac_x8(AES_ARGS *args, uint64_t len);
+
+#define AES128_CBC_MAC aes128_cbc_mac_x8
+
+#define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_avx
+#define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_avx
+
+#define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_avx
+#define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_avx
+
+/* ====================================================================== */
+
+/*
+ * GCM submit / flush API for AVX2 arch
+ */
+#ifndef NO_GCM
+static JOB_AES_HMAC *
+submit_job_aes_gcm_dec_avx2(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_dec_avx2(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+
+static JOB_AES_HMAC *
+submit_job_aes_gcm_enc_avx2(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_enc_avx2(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+#endif /* NO_GCM */
+
+/* ====================================================================== */
+
+void
+init_mb_mgr_avx2(MB_MGR *state)
+{
+ unsigned int j;
+ uint8_t *p;
+ size_t size;
+
+ state->features = cpu_feature_adjust(state->flags,
+ cpu_feature_detect());
+
+ if (!(state->features & IMB_FEATURE_AESNI)) {
+ init_mb_mgr_sse_no_aesni(state);
+ return;
+ }
+
+ /* Init AES out-of-order fields */
+ memset(state->aes128_ooo.lens, 0xFF,
+ sizeof(state->aes128_ooo.lens));
+ memset(&state->aes128_ooo.lens[0], 0,
+ sizeof(state->aes128_ooo.lens[0]) * 8);
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xF76543210;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+ memset(state->aes192_ooo.lens, 0xFF,
+ sizeof(state->aes192_ooo.lens));
+ memset(&state->aes192_ooo.lens[0], 0,
+ sizeof(state->aes192_ooo.lens[0]) * 8);
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xF76543210;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+ memset(&state->aes256_ooo.lens, 0xFF,
+ sizeof(state->aes256_ooo.lens));
+ memset(&state->aes256_ooo.lens[0], 0,
+ sizeof(state->aes256_ooo.lens[0]) * 8);
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xF76543210;
+ state->aes256_ooo.num_lanes_inuse = 0;
+
+ /* DOCSIS SEC BPI (AES CBC + AES CFB for partial block)
+ * uses same settings as AES128 CBC.
+ */
+ memset(state->docsis_sec_ooo.lens, 0xFF,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(&state->docsis_sec_ooo.lens[0], 0,
+ sizeof(state->docsis_sec_ooo.lens[0]) * 8);
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xF76543210;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+
+
+ /* Init HMAC/SHA1 out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0;
+ state->hmac_sha_1_ooo.lens[3] = 0;
+ state->hmac_sha_1_ooo.lens[4] = 0;
+ state->hmac_sha_1_ooo.lens[5] = 0;
+ state->hmac_sha_1_ooo.lens[6] = 0;
+ state->hmac_sha_1_ooo.lens[7] = 0;
+ state->hmac_sha_1_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < AVX2_NUM_SHA1_LANES; j++) {
+ state->hmac_sha_1_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_1_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_1_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64 + 7);
+ p = state->hmac_sha_1_ooo.ldata[j].outer_block;
+ memset(p + 5*4 + 1,
+ 0x00,
+ 64 - 5*4 - 1 - 2);
+ p[5 * 4] = 0x80;
+ p[64 - 2] = 0x02;
+ p[64 - 1] = 0xA0;
+ }
+ /* Init HMAC/SHA224 out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0;
+ state->hmac_sha_224_ooo.lens[3] = 0;
+ state->hmac_sha_224_ooo.lens[4] = 0;
+ state->hmac_sha_224_ooo.lens[5] = 0;
+ state->hmac_sha_224_ooo.lens[6] = 0;
+ state->hmac_sha_224_ooo.lens[7] = 0;
+ state->hmac_sha_224_ooo.unused_lanes = 0xF76543210;
+ /* sha256 and sha224 are very similar except for
+ * digest constants and output size
+ */
+ for (j = 0; j < AVX2_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_224_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_sha_224_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_sha_224_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[7 * 4] = 0x80; /* digest 7 words long */
+ p[64 - 2] = 0x02; /* length in little endian = 0x02E0 */
+ p[64 - 1] = 0xE0;
+ }
+
+ /* Init HMAC/SHA256 out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0;
+ state->hmac_sha_256_ooo.lens[3] = 0;
+ state->hmac_sha_256_ooo.lens[4] = 0;
+ state->hmac_sha_256_ooo.lens[5] = 0;
+ state->hmac_sha_256_ooo.lens[6] = 0;
+ state->hmac_sha_256_ooo.lens[7] = 0;
+ state->hmac_sha_256_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < AVX2_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_256_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_256_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_256_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64 + 7);
+ /* hmac related */
+ p = state->hmac_sha_256_ooo.ldata[j].outer_block;
+ memset(p + 8*4 + 1,
+ 0x00,
+ 64 - 8*4 - 1 - 2);
+ p[8 * 4] = 0x80; /* 8 digest words */
+ p[64 - 2] = 0x03; /* length */
+ p[64 - 1] = 0x00;
+ }
+
+ /* Init HMAC/SHA384 out-of-order fields */
+ state->hmac_sha_384_ooo.lens[0] = 0;
+ state->hmac_sha_384_ooo.lens[1] = 0;
+ state->hmac_sha_384_ooo.lens[2] = 0;
+ state->hmac_sha_384_ooo.lens[3] = 0;
+ state->hmac_sha_384_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_384_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < AVX2_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_384_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_384_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_384_BLOCK_SIZE + 1),
+ 0x00, SHA_384_BLOCK_SIZE + 7);
+ p = ctx->ldata[j].outer_block;
+ /* special end point because this length is constant */
+ memset(p + SHA384_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ SHA_384_BLOCK_SIZE -
+ SHA384_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA384_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /* hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 384 bits, 1408 bits == 0x0580.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_384_BLOCK_SIZE - 2] = 0x05;
+ p[SHA_384_BLOCK_SIZE - 1] = 0x80;
+ }
+
+ /* Init HMAC/SHA512 out-of-order fields */
+ state->hmac_sha_512_ooo.lens[0] = 0;
+ state->hmac_sha_512_ooo.lens[1] = 0;
+ state->hmac_sha_512_ooo.lens[2] = 0;
+ state->hmac_sha_512_ooo.lens[3] = 0;
+ state->hmac_sha_512_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_512_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < AVX2_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_512_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_512_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_512_BLOCK_SIZE + 1),
+ 0x00, SHA_512_BLOCK_SIZE + 7);
+ p = ctx->ldata[j].outer_block;
+ /* special end point because this length is constant */
+ memset(p + SHA512_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ SHA_512_BLOCK_SIZE -
+ SHA512_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA512_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /* hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 512 bits, 1536 bits == 0x600.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_512_BLOCK_SIZE - 2] = 0x06;
+ p[SHA_512_BLOCK_SIZE - 1] = 0x00;
+ }
+
+ /* Init HMAC/MD5 out-of-order fields */
+ state->hmac_md5_ooo.lens[0] = 0;
+ state->hmac_md5_ooo.lens[1] = 0;
+ state->hmac_md5_ooo.lens[2] = 0;
+ state->hmac_md5_ooo.lens[3] = 0;
+ state->hmac_md5_ooo.lens[4] = 0;
+ state->hmac_md5_ooo.lens[5] = 0;
+ state->hmac_md5_ooo.lens[6] = 0;
+ state->hmac_md5_ooo.lens[7] = 0;
+ state->hmac_md5_ooo.lens[8] = 0;
+ state->hmac_md5_ooo.lens[9] = 0;
+ state->hmac_md5_ooo.lens[10] = 0;
+ state->hmac_md5_ooo.lens[11] = 0;
+ state->hmac_md5_ooo.lens[12] = 0;
+ state->hmac_md5_ooo.lens[13] = 0;
+ state->hmac_md5_ooo.lens[14] = 0;
+ state->hmac_md5_ooo.lens[15] = 0;
+ state->hmac_md5_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->hmac_md5_ooo.num_lanes_inuse = 0;
+ for (j = 0; j < AVX2_NUM_MD5_LANES; j++) {
+ state->hmac_md5_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_md5_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_md5_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[4 * 4] = 0x80;
+ p[64 - 7] = 0x02;
+ p[64 - 8] = 0x80;
+ }
+
+ /* Init AES/XCBC OOO fields */
+ state->aes_xcbc_ooo.lens[0] = 0;
+ state->aes_xcbc_ooo.lens[1] = 0;
+ state->aes_xcbc_ooo.lens[2] = 0;
+ state->aes_xcbc_ooo.lens[3] = 0;
+ state->aes_xcbc_ooo.lens[4] = 0;
+ state->aes_xcbc_ooo.lens[5] = 0;
+ state->aes_xcbc_ooo.lens[6] = 0;
+ state->aes_xcbc_ooo.lens[7] = 0;
+ state->aes_xcbc_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < 8 ; j++) {
+ state->aes_xcbc_ooo.ldata[j].job_in_lane = NULL;
+ state->aes_xcbc_ooo.ldata[j].final_block[16] = 0x80;
+ memset(state->aes_xcbc_ooo.ldata[j].final_block + 17, 0x00, 15);
+ }
+
+ /* Init AES-CCM auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_ccm_ooo.init_done[j] = 0;
+ state->aes_ccm_ooo.lens[j] = 0;
+ state->aes_ccm_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_ccm_ooo.unused_lanes = 0xF76543210;
+
+ /* Init AES-CMAC auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_cmac_ooo.init_done[j] = 0;
+ state->aes_cmac_ooo.lens[j] = 0;
+ state->aes_cmac_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_cmac_ooo.unused_lanes = 0xF76543210;
+
+ /* Init "in order" components */
+ state->next_job = 0;
+ state->earliest_job = -1;
+
+ /* set handlers */
+ state->get_next_job = get_next_job_avx2;
+ state->submit_job = submit_job_avx2;
+ state->submit_job_nocheck = submit_job_nocheck_avx2;
+ state->get_completed_job = get_completed_job_avx2;
+ state->flush_job = flush_job_avx2;
+ state->queue_size = queue_size_avx2;
+ state->keyexp_128 = aes_keyexp_128_avx2;
+ state->keyexp_192 = aes_keyexp_192_avx2;
+ state->keyexp_256 = aes_keyexp_256_avx2;
+ state->cmac_subkey_gen_128 = aes_cmac_subkey_gen_avx2;
+ state->xcbc_keyexp = aes_xcbc_expand_key_avx2;
+ state->des_key_sched = des_key_schedule;
+ state->sha1_one_block = sha1_one_block_avx2;
+ state->sha1 = sha1_avx2;
+ state->sha224_one_block = sha224_one_block_avx2;
+ state->sha224 = sha224_avx2;
+ state->sha256_one_block = sha256_one_block_avx2;
+ state->sha256 = sha256_avx2;
+ state->sha384_one_block = sha384_one_block_avx2;
+ state->sha384 = sha384_avx2;
+ state->sha512_one_block = sha512_one_block_avx2;
+ state->sha512 = sha512_avx2;
+ state->md5_one_block = md5_one_block_avx2;
+ state->aes128_cfb_one = aes_cfb_128_one_avx2;
+
+ state->eea3_1_buffer = zuc_eea3_1_buffer_avx;
+ state->eea3_4_buffer = zuc_eea3_4_buffer_avx;
+ state->eea3_n_buffer = zuc_eea3_n_buffer_avx;
+ state->eia3_1_buffer = zuc_eia3_1_buffer_avx;
+
+ state->f8_1_buffer = kasumi_f8_1_buffer_avx;
+ state->f8_1_buffer_bit = kasumi_f8_1_buffer_bit_avx;
+ state->f8_2_buffer = kasumi_f8_2_buffer_avx;
+ state->f8_3_buffer = kasumi_f8_3_buffer_avx;
+ state->f8_4_buffer = kasumi_f8_4_buffer_avx;
+ state->f8_n_buffer = kasumi_f8_n_buffer_avx;
+ state->f9_1_buffer = kasumi_f9_1_buffer_avx;
+ state->f9_1_buffer_user = kasumi_f9_1_buffer_user_avx;
+ state->kasumi_init_f8_key_sched = kasumi_init_f8_key_sched_avx;
+ state->kasumi_init_f9_key_sched = kasumi_init_f9_key_sched_avx;
+ state->kasumi_key_sched_size = kasumi_key_sched_size_avx;
+
+ state->snow3g_f8_1_buffer_bit = snow3g_f8_1_buffer_bit_avx2;
+ state->snow3g_f8_1_buffer = snow3g_f8_1_buffer_avx2;
+ state->snow3g_f8_2_buffer = snow3g_f8_2_buffer_avx2;
+ state->snow3g_f8_4_buffer = snow3g_f8_4_buffer_avx2;
+ state->snow3g_f8_8_buffer = snow3g_f8_8_buffer_avx2;
+ state->snow3g_f8_n_buffer = snow3g_f8_n_buffer_avx2;
+ state->snow3g_f8_8_buffer_multikey = snow3g_f8_8_buffer_multikey_avx2;
+ state->snow3g_f8_n_buffer_multikey = snow3g_f8_n_buffer_multikey_avx2;
+ state->snow3g_f9_1_buffer = snow3g_f9_1_buffer_avx2;
+ state->snow3g_init_key_sched = snow3g_init_key_sched_avx2;
+ state->snow3g_key_sched_size = snow3g_key_sched_size_avx2;
+
+#ifndef NO_GCM
+ state->gcm128_enc = aes_gcm_enc_128_avx_gen4;
+ state->gcm192_enc = aes_gcm_enc_192_avx_gen4;
+ state->gcm256_enc = aes_gcm_enc_256_avx_gen4;
+ state->gcm128_dec = aes_gcm_dec_128_avx_gen4;
+ state->gcm192_dec = aes_gcm_dec_192_avx_gen4;
+ state->gcm256_dec = aes_gcm_dec_256_avx_gen4;
+ state->gcm128_init = aes_gcm_init_128_avx_gen4;
+ state->gcm192_init = aes_gcm_init_192_avx_gen4;
+ state->gcm256_init = aes_gcm_init_256_avx_gen4;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_avx_gen4;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_avx_gen4;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_avx_gen4;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_avx_gen4;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_avx_gen4;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_avx_gen4;
+ state->gcm128_enc_finalize = aes_gcm_enc_128_finalize_avx_gen4;
+ state->gcm192_enc_finalize = aes_gcm_enc_192_finalize_avx_gen4;
+ state->gcm256_enc_finalize = aes_gcm_enc_256_finalize_avx_gen4;
+ state->gcm128_dec_finalize = aes_gcm_dec_128_finalize_avx_gen4;
+ state->gcm192_dec_finalize = aes_gcm_dec_192_finalize_avx_gen4;
+ state->gcm256_dec_finalize = aes_gcm_dec_256_finalize_avx_gen4;
+ state->gcm128_precomp = aes_gcm_precomp_128_avx_gen4;
+ state->gcm192_precomp = aes_gcm_precomp_192_avx_gen4;
+ state->gcm256_precomp = aes_gcm_precomp_256_avx_gen4;
+ state->gcm128_pre = aes_gcm_pre_128_avx_gen4;
+ state->gcm192_pre = aes_gcm_pre_192_avx_gen4;
+ state->gcm256_pre = aes_gcm_pre_256_avx_gen4;
+#endif
+}
+
+#include "mb_mgr_code.h"
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_flush_avx2.asm
new file mode 100644
index 000000000..88fac0c64
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_flush_avx2.asm
@@ -0,0 +1,315 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+extern sha1_x8_avx2
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+x00: ;ddq 0x00000000000000000000000000000000
+ dq 0x0000000000000000, 0x0000000000000000
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rdi, rbp
+%define idx rbp
+
+%define unused_lanes r9
+%define lane_data r9
+%define tmp2 r9
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%endif
+
+; we clobber rbp, called routine clobbers r12-r15
+struc STACK
+_gpr_save: resq 5
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_avx2,function,internal)
+flush_job_hmac_avx2:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32 ; align stack to 32 byte boundary
+ mov [rsp + _gpr_save + 8*0], rbp
+ mov [rsp + _gpr_save + 8*1], r12
+ mov [rsp + _gpr_save + 8*2], r13
+ mov [rsp + _gpr_save + 8*3], r14
+ mov [rsp + _gpr_save + 8*4], r15
+ mov [rsp + _rsp_save], rax
+
+ mov unused_lanes, [state + _unused_lanes]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+%assign I 1
+%rep 7
+ cmp qword [state + _ldata + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ cmovne idx, [rel APPEND(lane_,I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy valid lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens]
+ mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ DBGPRINTL64 "FLUSH min_length", len2
+ DBGPRINTL64 "FLUSH min_length index ", idx
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1
+ DBGPRINTL_XMM "FLUSH lens after shuffle", xmm1
+
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens], xmm0
+ DBGPRINTL_XMM "FLUSH lens immediately after min subtraction", xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_x8_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*4], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 4 ;; a nibble
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(r12), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(r12)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(r12)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(r13), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(r14), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(r13)
+ bswap DWORD(r14)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(r13)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(r14)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ lea lane_data, [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size)]
+
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ vzeroupper
+ mov rbp, [rsp + _gpr_save + 8*0]
+ mov r12, [rsp + _gpr_save + 8*1]
+ mov r13, [rsp + _gpr_save + 8*2]
+ mov r14, [rsp + _gpr_save + 8*3]
+ mov r15, [rsp + _gpr_save + 8*4]
+ mov rsp, [rsp + _rsp_save]
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_flush_avx2.asm
new file mode 100644
index 000000000..f123157b7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_flush_avx2.asm
@@ -0,0 +1,362 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+extern md5_x8x2_avx2
+
+section .data
+default rel
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+x00: ;ddq 0x00000000000000000000000000000000
+ dq 0x0000000000000000, 0x0000000000000000
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+lane_8: dq 8
+lane_9: dq 9
+lane_10: dq 10
+lane_11: dq 11
+lane_12: dq 12
+lane_13: dq 13
+lane_14: dq 14
+lane_15: dq 15
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+%define tmp5 r9
+%define num_lanes_inuse r12
+%define len_upper r13
+%define idx_upper r14
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_md5_avx2,function,internal)
+flush_job_hmac_md5_avx2:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "---------- enter md5 flush -----------"
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_md5] ;; empty?
+ cmp num_lanes_inuse, 0
+ jz return_null
+
+ ; find a lane with a non-null job -- flush does not have to be efficient!
+ mov idx, 0
+ %assign I 1
+%rep 15
+ cmp qword [state + _ldata_md5 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel APPEND(lane_,I)]
+%assign I (I+1)
+%endrep
+
+
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ mov tmp, [state + _args_data_ptr_md5 + PTR_SZ*idx]
+ ;; tackle lower 8 lanes
+ vmovdqa xmm0, [state + _lens_md5 + 0*16] ;; lower 8 lengths
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(lower_skip_,I)
+ mov [state + _args_data_ptr_md5 + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(lower_skip_,I):
+%assign I (I+1)
+%endrep
+ ;; tackle upper lanes
+ vmovdqa xmm1, [state + _lens_md5 + 1*16] ;; upper 8 lengths
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + (8 + I) * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(upper_skip_,I)
+ mov [state + _args_data_ptr_md5 + PTR_SZ*(8+I)], tmp
+ vpor xmm1, xmm1, [rel len_masks + 16*I]
+APPEND(upper_skip_,I):
+%assign I (I+1)
+%endrep
+ jmp start_loop0
+
+ align 32
+start_loop0:
+ ; Find min length
+ vphminposuw xmm2, xmm0
+ vpextrw DWORD(len2), xmm2, 0 ; min value
+ vpextrw DWORD(idx), xmm2, 1 ; min index (0...7)
+
+ vphminposuw xmm3, xmm1
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+min_in_high:
+ vmovdqa xmm2, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper
+ or idx, 0x8 ; to reflect that index in 8-F
+use_min:
+ and len2, len2 ; to set flags
+ jz len_is_0
+ DBGPRINTL64 "min_length min_index ", len2, idx
+ DBGPRINTL_XMM "FLUSH md5 lens before sub lower", xmm0
+ vpbroadcastw xmm2, xmm2 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm2
+ DBGPRINTL_XMM "FLUSH md5 lens after sub lower", xmm0
+ vmovdqa [state + _lens_md5 + 0*16], xmm0
+
+ vpsubw xmm1, xmm1, xmm2
+ DBGPRINTL_XMM "FLUSH md5 lens after sub upper", xmm1
+ vmovdqa [state + _lens_md5 + 1*16], xmm1
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x8x2_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_md5 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+ vmovdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_md5 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_md5]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_md5] ;; update lanes inuse
+ sub num_lanes_inuse, 1
+ mov [state + _num_lanes_inuse_md5], DWORD(num_lanes_inuse)
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+; bswap DWORD(tmp2)
+; bswap DWORD(tmp4)
+; bswap DWORD(tmp3)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp5)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp5)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 16
+ cmp qword [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (16 bytes)
+%assign J 0
+%rep 4
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*I + J*MD5_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+
+ lea lane_data, [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 16 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ DBGPRINTL "---------- exit md5 flush -----------"
+ vzeroupper
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_submit_avx2.asm
new file mode 100644
index 000000000..661ae4eba
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_md5_submit_avx2.asm
@@ -0,0 +1,373 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/memcpy.asm"
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+extern md5_x8x2_avx2
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+%define num_lanes_inuse r12
+%define len_upper r13
+%define idx_upper r14
+
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+section .text
+
+; JOB* submit_job_hmac_md5_avx(MB_MGR_HMAC_MD5_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_md5_avx2,function,internal)
+submit_job_hmac_md5_avx2:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "---------- enter md5 submit -----------"
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_md5]
+ mov lane, unused_lanes
+
+ and lane, 0xF
+ shr unused_lanes, 4
+ mov [state + _unused_lanes_md5], unused_lanes
+ add num_lanes_inuse, 1
+ mov [state + _num_lanes_inuse_md5], DWORD(num_lanes_inuse)
+ DBGPRINTL64 "SUBMIT ********** num_lanes_in_use", num_lanes_inuse
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+ DBGPRINTL64 "SUBMIT job len, num_blks ", len, tmp
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ VPINSRW_M256 state + _lens_md5, xmm0, xmm1, last_len, p, lane, tmp, scale_x16
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu ymm0, [p - 64 + 0 * 32]
+ vmovdqu ymm1, [p - 64 + 1 * 32]
+ vmovdqu [lane_data + _extra_block + 0*32], ymm0
+ vmovdqu [lane_data + _extra_block + 1*32], ymm1
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+; bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ VPINSRW_M256 state + _lens_md5, xmm0, xmm1, tmp, len2, lane, extra_blocks, scale_x16
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ DBGPRINTL64 "SUBMIT md5 all lanes loaded? ********** num_lanes_in_use", num_lanes_inuse
+ cmp num_lanes_inuse, 0x10 ; all 16 lanes loaded?
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_md5]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+
+ vmovdqa xmm2, [state + _lens_md5 + 1*16] ;; second 8 lengths
+ vphminposuw xmm3, xmm2
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+min_in_high:
+
+ vmovdqa xmm1, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper ;; idx retrieved would be [0-7]
+ or idx, 0x8 ;; to reflect that index in 8-F
+
+use_min:
+
+ cmp len2, 0
+ je len_is_0
+ DBGPRINTL64 "min_length min_index ", len2, idx
+ vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_md5 + 0*16], xmm0
+ DBGPRINTL_XMM "SUBMIT lens after sub lower", xmm0
+
+ vpsubw xmm2, xmm2, xmm1
+ vmovdqa [state + _lens_md5 + 1*16], xmm2
+ DBGPRINTL_XMM "SUBMIT lens after sub upper", xmm2
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x8x2_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ VPINSRW_M256 state + _lens_md5, xmm0, xmm1, tmp, job, idx, 1, scale_x16
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+ vmovdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ VPINSRW_M256 state + _lens_md5, xmm0, xmm1, tmp, len2, idx, extra_blocks, scale_x16
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exiting
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx2_64_1 p2, p, len, tmp4, tmp2, ymm0, ymm1
+ mov unused_lanes, [state + _unused_lanes_md5]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_md5]
+ sub num_lanes_inuse, 1
+ mov [state + _num_lanes_inuse_md5], DWORD(num_lanes_inuse)
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp3)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 0
+
+ vpxor ymm0, ymm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 16 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+%endif
+
+return:
+ DBGPRINTL "---------- exit md5 submit -----------"
+
+ vzeroupper
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_flush_avx2.asm
new file mode 100644
index 000000000..b5d0a1b6b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_flush_avx2.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_224_avx2
+%define SHA224
+
+%include "avx2/mb_mgr_hmac_sha_256_flush_avx2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_submit_avx2.asm
new file mode 100644
index 000000000..e4b254b95
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_224_submit_avx2.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_224_avx2
+%define SHA224
+
+%include "avx2/mb_mgr_hmac_sha_256_submit_avx2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_flush_avx2.asm
new file mode 100644
index 000000000..f41c9329b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_flush_avx2.asm
@@ -0,0 +1,379 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha256_oct_avx2
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_256_avx2
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r15
+%define idx rbp
+
+%define unused_lanes r10
+%define tmp5 r10
+
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 reg3
+%define tmp r9
+%endif
+
+; we clobber rsi, rbp; called routine also clobbers rbx, rdi, r12, r13, r14
+struc STACK
+_gpr_save: resq 7
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state)
+; arg 1 : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*5], rsi
+ mov [rsp + _gpr_save + 8*6], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; if bit (32+3) is set, then all lanes are empty
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+
+%assign I 1
+%rep 7
+ cmp qword [state + _ldata_sha256 + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ cmovne idx, [rel APPEND(lane_,I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ vmovdqa xmm0, [state + _lens_sha256]
+ mov tmp, [state + _args_data_ptr_sha256 + 8*idx]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_sha256 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_sha256 + 8*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha256 ], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_oct_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+ ;; copy SHA224 14bytes / SHA256 16bytes
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp4)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp5)
+%else
+ mov [p + 3*4], DWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp4)
+ mov [p + 3*4], DWORD(tmp5)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+%ifndef SHA224
+ bswap DWORD(tmp5)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp4)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (28 bytes for SHA-224, 32 bytes for SHA-256 bytes)
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+%ifdef SHA224
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqu [lane_data + _outer_block], ymm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ vzeroupper
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*5]
+ mov rdi, [rsp + _gpr_save + 8*6]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_submit_avx2.asm
new file mode 100644
index 000000000..46cea27bb
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_submit_avx2.asm
@@ -0,0 +1,426 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha256_oct_avx2
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_256_avx2
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define p2 rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+
+%define lane_data r10
+
+%endif
+
+
+; we clobber rbx, rsi, rdi, rbp; called routine also clobbers r12, r13, r14
+struc STACK
+_gpr_save: resq 7
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*5], rsi
+ mov [rsp + _gpr_save + 8*6], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov lane, unused_lanes
+ and lane, 0xF ;; just a nibble
+ shr unused_lanes, 4
+
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov [state + _unused_lanes_sha256], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, extra_blocks, lane, tmp, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha256 + 8*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu ymm0, [p - 64 + 0 * 32]
+ vmovdqu ymm1, [p - 64 + 1 * 32]
+ vmovdqu [lane_data + _extra_block + 0*32], ymm0
+ vmovdqu [lane_data + _extra_block + 1*32], ymm1
+
+end_fast_copy:
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*lane + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*lane + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*lane + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xf
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha256]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_oct_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ vmovdqa xmm0, [state + _lens_sha256]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx2_64_1 p2, p, len, tmp, tmp2, ymm0, ymm1
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ vzeroupper
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+ ;; copy 14 bytes for SHA224 / 16 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp4)
+%else
+ mov [p + 3*4], DWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 / 32 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+ mov [p + 3*4], DWORD(tmp4)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+%ifndef SHA224
+ bswap DWORD(tmp4)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp3)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B) of returned job
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxor ymm0, ymm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+%ifdef SHA224
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqu [lane_data + _outer_block], ymm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*5]
+ mov rdi, [rsp + _gpr_save + 8*6]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_flush_avx2.asm
new file mode 100644
index 000000000..b354cdff3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_flush_avx2.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_384_avx2
+%define SHA_X_DIGEST_SIZE 384
+
+%include "avx2/mb_mgr_hmac_sha_512_flush_avx2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_submit_avx2.asm
new file mode 100644
index 000000000..46cd3c54f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_384_submit_avx2.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_384_avx2
+%define SHA_X_DIGEST_SIZE 384
+
+%include "avx2/mb_mgr_hmac_sha_512_submit_avx2.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_flush_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_flush_avx2.asm
new file mode 100644
index 000000000..14a28c43a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_flush_avx2.asm
@@ -0,0 +1,353 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha512_x4_avx2
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_512_avx2
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%endif
+
+; we clobber rbx, rbp; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 3
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_512_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+%assign I 1
+%rep 3
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ cmovne idx, [rel APPEND(lane_, I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens_sha512]
+ mov tmp, [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*idx]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha512], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0x00
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x4_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+ mov word [state + _lens_sha512 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+ ; move digest into data location
+ %assign I 0
+ %rep (SHA_X_DIGEST_SIZE / (8*16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I*2*SHA512_DIGEST_WORD_SIZE], xmm0
+ %assign I (I+1)
+ %endrep
+
+ ; move the opad key into digest
+ mov tmp, [job + _auth_key_xor_opad]
+
+ %assign I 0
+ %rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+ %assign I (I+1)
+ %endrep
+
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+ mov [state + _lens_sha512 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+ mov [p + 3*8], QWORD(tmp5)
+
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 4*8], QWORD(tmp2)
+ mov [p + 5*8], QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp6)
+ mov [p + 7*8], QWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size) + _job_in_lane_sha512], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (48 bytes for SHA-384, 64 bytes for SHA-512 bytes)
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 6*SHA512_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 7*SHA512_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size)]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], ymm0
+%assign offset (offset + 32)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ vmovdqu [lane_data + _outer_block], ymm0
+%if (SHA_X_DIGEST_SIZE == 384)
+ vmovdqa [lane_data + _outer_block + 32], xmm0
+%else
+ vmovdqu [lane_data + _outer_block + 32], ymm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ vzeroupper
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_submit_avx2.asm
new file mode 100644
index 000000000..a7c3e249b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_512_submit_avx2.asm
@@ -0,0 +1,416 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha512_x4_avx2
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_512_avx2
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r13, r14, r16
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; Define stack usage
+
+; we clobber rbx, rsi, rdi, rbp; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 5
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_sha_512_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*3], rsi
+ mov [rsp + _gpr_save + 8*4], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov [state + _unused_lanes_sha512], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 7 ; divide by 128, len in terms of blocks
+
+ mov [lane_data + _job_in_lane_sha512], job
+ mov dword [lane_data + _outer_done_sha512], 0
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, extra_blocks, lane, tmp, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+
+ mov last_len, len
+ and last_len, 127
+ lea extra_blocks, [last_len + 17 + 127]
+ shr extra_blocks, 7
+ mov [lane_data + _extra_blocks_sha512], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], p
+
+ cmp len, 128
+ jb copy_lt128
+
+fast_copy:
+ add p, len
+ vmovdqu ymm0, [p - 128 + 0*32]
+ vmovdqu ymm1, [p - 128 + 1*32]
+ vmovdqu ymm2, [p - 128 + 2*32]
+ vmovdqu ymm3, [p - 128 + 3*32]
+ vmovdqu [lane_data + _extra_block_sha512 + 0*32], ymm0
+ vmovdqu [lane_data + _extra_block_sha512 + 1*32], ymm1
+ vmovdqu [lane_data + _extra_block_sha512 + 2*32], ymm2
+ vmovdqu [lane_data + _extra_block_sha512 + 3*32], ymm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 7
+ sub size_offset, last_len
+ add size_offset, 128-8
+ mov [lane_data + _size_offset_sha512], DWORD(size_offset)
+ mov start_offset, 128
+ sub start_offset, last_len
+ mov [lane_data + _start_offset_sha512], DWORD(start_offset)
+
+ lea tmp, [8*128 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block_sha512 + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 2 * SHA512_DIGEST_WORD_SIZE]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ test len, ~127
+ jnz ge128_bytes
+
+lt128_bytes:
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], tmp ;; 8 to hold a UINT8
+ mov dword [lane_data + _extra_blocks_sha512], 0
+
+ge128_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha512]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...1)
+ cmp len2, 0
+ je len_is_0
+
+ vpshuflw xmm1, xmm1, 0x00
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x4_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+%assign I 0
+%rep (SHA_X_DIGEST_SIZE / (8 * 16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 0)*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I * 2 * SHA512_DIGEST_WORD_SIZE], xmm0
+%assign I (I+1)
+%endrep
+
+ mov tmp, [job + _auth_key_xor_opad]
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I+0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+
+ vmovdqa xmm0, [state + _lens_sha512]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp ;; idx is index of shortest length message
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp start_loop
+
+ align 16
+copy_lt128:
+ ;; less than one message block of data
+ ;; destination extra block but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 128]
+ sub p2, len
+ memcpy_avx2_128_1 p2, p, len, tmp4, tmp2, ymm0, ymm1, ymm2, ymm3
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ vzeroupper
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+ mov [p + 3*8], QWORD(tmp4)
+
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 4*8], QWORD(tmp)
+ mov [p + 5*8], QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp3)
+ mov [p + 7*8], QWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA256_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxor ymm0, ymm0
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 4
+ vmovdqa [lane_data + _extra_block + offset], ymm0
+%assign offset (offset + 32)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ vmovdqu [lane_data + _outer_block], ymm0
+%if (SHA_X_DIGEST_SIZE == 384)
+ vmovdqa [lane_data + _outer_block + 32], xmm0
+%else
+ vmovdqu [lane_data + _outer_block + 32], ymm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*3]
+ mov rdi, [rsp + _gpr_save + 8*4]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_submit_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_submit_avx2.asm
new file mode 100644
index 000000000..92b129f74
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_submit_avx2.asm
@@ -0,0 +1,369 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "include/const.inc"
+
+extern sha1_x8_avx2
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rdi, rbp
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes r12
+%define tmp4 r12
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; we clobber rsi, rdi, rbp, r12; called routine clobbers also r13-r15
+struc STACK
+_gpr_save: resq 7
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_avx2,function,internal)
+submit_job_hmac_avx2:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32 ; align to 32 byte boundary
+ mov [rsp + _gpr_save + 8*0], rbp
+ mov [rsp + _gpr_save + 8*1], r12
+ mov [rsp + _gpr_save + 8*2], r13
+ mov [rsp + _gpr_save + 8*3], r14
+ mov [rsp + _gpr_save + 8*4], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*5], rsi
+ mov [rsp + _gpr_save + 8*6], rdi
+%endif
+ mov [rsp + _rsp_save], rax
+ DBGPRINTL "---------- enter sha1 submit -----------"
+
+ mov unused_lanes, [state + _unused_lanes]
+ mov lane, unused_lanes
+ and lane, 0xF ;; just a nibble
+ shr unused_lanes, 4
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov [state + _unused_lanes], unused_lanes
+
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, extra_blocks, lane, tmp, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr + PTR_SZ*lane], p
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ vmovdqu ymm0, [p - 64 + 0 * 32]
+ vmovdqu ymm1, [p - 64 + 1 * 32]
+ vmovdqu [lane_data + _extra_block + 0*32], ymm0
+ vmovdqu [lane_data + _extra_block + 1*32], ymm1
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xf
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
+ DBGPRINTL64 "min_length", len2
+ DBGPRINTL64 "min_length index ", idx
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1
+ DBGPRINTL_XMM "SUBMIT lens after shuffle", xmm1
+
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens], xmm0
+ DBGPRINTL_XMM "lengths after subtraction", xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_x8_avx2
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ vmovdqa xmm0, [state + _lens]
+ XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ vmovdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx2_64_1 p2, p, len, tmp4, tmp2, ymm0, ymm1
+ mov unused_lanes, [state + _unused_lanes]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ vzeroupper
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ mov [p + 0*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 1*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 2*SHA1_DIGEST_WORD_SIZE], DWORD(tmp3)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ vpxor ymm0, ymm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+
+ ;; Clear first 64 bytes of extra_block
+ vmovdqa [lane_data + _extra_block], ymm0
+ vmovdqa [lane_data + _extra_block + 32], ymm0
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+%endif
+
+return:
+ DBGPRINTL "---------- exit sha1 submit -----------"
+ mov rbp, [rsp + _gpr_save + 8*0]
+ mov r12, [rsp + _gpr_save + 8*1]
+ mov r13, [rsp + _gpr_save + 8*2]
+ mov r14, [rsp + _gpr_save + 8*3]
+ mov r15, [rsp + _gpr_save + 8*4]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*5]
+ mov rdi, [rsp + _gpr_save + 8*6]
+%endif
+ mov rsp, [rsp + _rsp_save]
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/md5_x8x2_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/md5_x8x2_avx2.asm
new file mode 100644
index 000000000..6d6830a99
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/md5_x8x2_avx2.asm
@@ -0,0 +1,820 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute double octal MD5 using AVX2
+
+;; Stack must be aligned to 32 bytes before call
+;; Windows clobbers: rax rbx rdx rsi rdi r8 r9 r10 r11 r12 r13 r14 r15
+;; Windows preserves: rcx rbp
+;;
+;; Linux clobbers: rax rbx rcx rdx rsi r8 r9 r10 r11 r12 r13 r14 r15
+;; Linux preserves: rdi rbp
+;;
+;; clobbers ymm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx2.asm"
+
+section .data
+default rel
+align 64
+MD5_TABLE:
+ dd 0xd76aa478, 0xd76aa478, 0xd76aa478, 0xd76aa478
+ dd 0xd76aa478, 0xd76aa478, 0xd76aa478, 0xd76aa478
+ dd 0xe8c7b756, 0xe8c7b756, 0xe8c7b756, 0xe8c7b756
+ dd 0xe8c7b756, 0xe8c7b756, 0xe8c7b756, 0xe8c7b756
+ dd 0x242070db, 0x242070db, 0x242070db, 0x242070db
+ dd 0x242070db, 0x242070db, 0x242070db, 0x242070db
+ dd 0xc1bdceee, 0xc1bdceee, 0xc1bdceee, 0xc1bdceee
+ dd 0xc1bdceee, 0xc1bdceee, 0xc1bdceee, 0xc1bdceee
+ dd 0xf57c0faf, 0xf57c0faf, 0xf57c0faf, 0xf57c0faf
+ dd 0xf57c0faf, 0xf57c0faf, 0xf57c0faf, 0xf57c0faf
+ dd 0x4787c62a, 0x4787c62a, 0x4787c62a, 0x4787c62a
+ dd 0x4787c62a, 0x4787c62a, 0x4787c62a, 0x4787c62a
+ dd 0xa8304613, 0xa8304613, 0xa8304613, 0xa8304613
+ dd 0xa8304613, 0xa8304613, 0xa8304613, 0xa8304613
+ dd 0xfd469501, 0xfd469501, 0xfd469501, 0xfd469501
+ dd 0xfd469501, 0xfd469501, 0xfd469501, 0xfd469501
+ dd 0x698098d8, 0x698098d8, 0x698098d8, 0x698098d8
+ dd 0x698098d8, 0x698098d8, 0x698098d8, 0x698098d8
+ dd 0x8b44f7af, 0x8b44f7af, 0x8b44f7af, 0x8b44f7af
+ dd 0x8b44f7af, 0x8b44f7af, 0x8b44f7af, 0x8b44f7af
+ dd 0xffff5bb1, 0xffff5bb1, 0xffff5bb1, 0xffff5bb1
+ dd 0xffff5bb1, 0xffff5bb1, 0xffff5bb1, 0xffff5bb1
+ dd 0x895cd7be, 0x895cd7be, 0x895cd7be, 0x895cd7be
+ dd 0x895cd7be, 0x895cd7be, 0x895cd7be, 0x895cd7be
+ dd 0x6b901122, 0x6b901122, 0x6b901122, 0x6b901122
+ dd 0x6b901122, 0x6b901122, 0x6b901122, 0x6b901122
+ dd 0xfd987193, 0xfd987193, 0xfd987193, 0xfd987193
+ dd 0xfd987193, 0xfd987193, 0xfd987193, 0xfd987193
+ dd 0xa679438e, 0xa679438e, 0xa679438e, 0xa679438e
+ dd 0xa679438e, 0xa679438e, 0xa679438e, 0xa679438e
+ dd 0x49b40821, 0x49b40821, 0x49b40821, 0x49b40821
+ dd 0x49b40821, 0x49b40821, 0x49b40821, 0x49b40821
+ dd 0xf61e2562, 0xf61e2562, 0xf61e2562, 0xf61e2562
+ dd 0xf61e2562, 0xf61e2562, 0xf61e2562, 0xf61e2562
+ dd 0xc040b340, 0xc040b340, 0xc040b340, 0xc040b340
+ dd 0xc040b340, 0xc040b340, 0xc040b340, 0xc040b340
+ dd 0x265e5a51, 0x265e5a51, 0x265e5a51, 0x265e5a51
+ dd 0x265e5a51, 0x265e5a51, 0x265e5a51, 0x265e5a51
+ dd 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa
+ dd 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa
+ dd 0xd62f105d, 0xd62f105d, 0xd62f105d, 0xd62f105d
+ dd 0xd62f105d, 0xd62f105d, 0xd62f105d, 0xd62f105d
+ dd 0x02441453, 0x02441453, 0x02441453, 0x02441453
+ dd 0x02441453, 0x02441453, 0x02441453, 0x02441453
+ dd 0xd8a1e681, 0xd8a1e681, 0xd8a1e681, 0xd8a1e681
+ dd 0xd8a1e681, 0xd8a1e681, 0xd8a1e681, 0xd8a1e681
+ dd 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8
+ dd 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8
+ dd 0x21e1cde6, 0x21e1cde6, 0x21e1cde6, 0x21e1cde6
+ dd 0x21e1cde6, 0x21e1cde6, 0x21e1cde6, 0x21e1cde6
+ dd 0xc33707d6, 0xc33707d6, 0xc33707d6, 0xc33707d6
+ dd 0xc33707d6, 0xc33707d6, 0xc33707d6, 0xc33707d6
+ dd 0xf4d50d87, 0xf4d50d87, 0xf4d50d87, 0xf4d50d87
+ dd 0xf4d50d87, 0xf4d50d87, 0xf4d50d87, 0xf4d50d87
+ dd 0x455a14ed, 0x455a14ed, 0x455a14ed, 0x455a14ed
+ dd 0x455a14ed, 0x455a14ed, 0x455a14ed, 0x455a14ed
+ dd 0xa9e3e905, 0xa9e3e905, 0xa9e3e905, 0xa9e3e905
+ dd 0xa9e3e905, 0xa9e3e905, 0xa9e3e905, 0xa9e3e905
+ dd 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8
+ dd 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8
+ dd 0x676f02d9, 0x676f02d9, 0x676f02d9, 0x676f02d9
+ dd 0x676f02d9, 0x676f02d9, 0x676f02d9, 0x676f02d9
+ dd 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a
+ dd 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a
+ dd 0xfffa3942, 0xfffa3942, 0xfffa3942, 0xfffa3942
+ dd 0xfffa3942, 0xfffa3942, 0xfffa3942, 0xfffa3942
+ dd 0x8771f681, 0x8771f681, 0x8771f681, 0x8771f681
+ dd 0x8771f681, 0x8771f681, 0x8771f681, 0x8771f681
+ dd 0x6d9d6122, 0x6d9d6122, 0x6d9d6122, 0x6d9d6122
+ dd 0x6d9d6122, 0x6d9d6122, 0x6d9d6122, 0x6d9d6122
+ dd 0xfde5380c, 0xfde5380c, 0xfde5380c, 0xfde5380c
+ dd 0xfde5380c, 0xfde5380c, 0xfde5380c, 0xfde5380c
+ dd 0xa4beea44, 0xa4beea44, 0xa4beea44, 0xa4beea44
+ dd 0xa4beea44, 0xa4beea44, 0xa4beea44, 0xa4beea44
+ dd 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9
+ dd 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9
+ dd 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60
+ dd 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60
+ dd 0xbebfbc70, 0xbebfbc70, 0xbebfbc70, 0xbebfbc70
+ dd 0xbebfbc70, 0xbebfbc70, 0xbebfbc70, 0xbebfbc70
+ dd 0x289b7ec6, 0x289b7ec6, 0x289b7ec6, 0x289b7ec6
+ dd 0x289b7ec6, 0x289b7ec6, 0x289b7ec6, 0x289b7ec6
+ dd 0xeaa127fa, 0xeaa127fa, 0xeaa127fa, 0xeaa127fa
+ dd 0xeaa127fa, 0xeaa127fa, 0xeaa127fa, 0xeaa127fa
+ dd 0xd4ef3085, 0xd4ef3085, 0xd4ef3085, 0xd4ef3085
+ dd 0xd4ef3085, 0xd4ef3085, 0xd4ef3085, 0xd4ef3085
+ dd 0x04881d05, 0x04881d05, 0x04881d05, 0x04881d05
+ dd 0x04881d05, 0x04881d05, 0x04881d05, 0x04881d05
+ dd 0xd9d4d039, 0xd9d4d039, 0xd9d4d039, 0xd9d4d039
+ dd 0xd9d4d039, 0xd9d4d039, 0xd9d4d039, 0xd9d4d039
+ dd 0xe6db99e5, 0xe6db99e5, 0xe6db99e5, 0xe6db99e5
+ dd 0xe6db99e5, 0xe6db99e5, 0xe6db99e5, 0xe6db99e5
+ dd 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8
+ dd 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8
+ dd 0xc4ac5665, 0xc4ac5665, 0xc4ac5665, 0xc4ac5665
+ dd 0xc4ac5665, 0xc4ac5665, 0xc4ac5665, 0xc4ac5665
+ dd 0xf4292244, 0xf4292244, 0xf4292244, 0xf4292244
+ dd 0xf4292244, 0xf4292244, 0xf4292244, 0xf4292244
+ dd 0x432aff97, 0x432aff97, 0x432aff97, 0x432aff97
+ dd 0x432aff97, 0x432aff97, 0x432aff97, 0x432aff97
+ dd 0xab9423a7, 0xab9423a7, 0xab9423a7, 0xab9423a7
+ dd 0xab9423a7, 0xab9423a7, 0xab9423a7, 0xab9423a7
+ dd 0xfc93a039, 0xfc93a039, 0xfc93a039, 0xfc93a039
+ dd 0xfc93a039, 0xfc93a039, 0xfc93a039, 0xfc93a039
+ dd 0x655b59c3, 0x655b59c3, 0x655b59c3, 0x655b59c3
+ dd 0x655b59c3, 0x655b59c3, 0x655b59c3, 0x655b59c3
+ dd 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92
+ dd 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92
+ dd 0xffeff47d, 0xffeff47d, 0xffeff47d, 0xffeff47d
+ dd 0xffeff47d, 0xffeff47d, 0xffeff47d, 0xffeff47d
+ dd 0x85845dd1, 0x85845dd1, 0x85845dd1, 0x85845dd1
+ dd 0x85845dd1, 0x85845dd1, 0x85845dd1, 0x85845dd1
+ dd 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f
+ dd 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f
+ dd 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0
+ dd 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0
+ dd 0xa3014314, 0xa3014314, 0xa3014314, 0xa3014314
+ dd 0xa3014314, 0xa3014314, 0xa3014314, 0xa3014314
+ dd 0x4e0811a1, 0x4e0811a1, 0x4e0811a1, 0x4e0811a1
+ dd 0x4e0811a1, 0x4e0811a1, 0x4e0811a1, 0x4e0811a1
+ dd 0xf7537e82, 0xf7537e82, 0xf7537e82, 0xf7537e82
+ dd 0xf7537e82, 0xf7537e82, 0xf7537e82, 0xf7537e82
+ dd 0xbd3af235, 0xbd3af235, 0xbd3af235, 0xbd3af235
+ dd 0xbd3af235, 0xbd3af235, 0xbd3af235, 0xbd3af235
+ dd 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb
+ dd 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb
+ dd 0xeb86d391, 0xeb86d391, 0xeb86d391, 0xeb86d391
+ dd 0xeb86d391, 0xeb86d391, 0xeb86d391, 0xeb86d391
+ONES: dd 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+ dd 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+
+section .text
+
+%ifndef LINUX
+ %define arg1 rcx
+ %define arg2 rdx
+ %define reg3 rdi
+ %define reg4 rsi
+%else
+ %define arg1 rdi
+ %define arg2 rsi
+ %define reg3 rcx
+ %define reg4 rdx
+%endif
+
+;; rbp is not clobbered
+
+%define state arg1
+%define num_blks arg2
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+%define inp4 r12
+%define inp5 r13
+%define inp6 r14
+%define inp7 r15
+
+;; These are pointers to data block1 and block2 in the stack
+; which will ping pong back and forth
+%define DPTR1 rbx
+%define DPTR2 reg3
+
+%define TBL rax
+%define IDX reg4
+
+;; Transposed Digest Storage
+%define Y_A ymm0
+%define Y_B ymm1
+%define Y_C ymm2
+%define Y_D ymm3
+%define Y_A2 ymm4
+%define Y_B2 ymm5
+%define Y_C2 ymm6
+%define Y_D2 ymm7
+
+;; Temp YMM registers corresponding to the Temp XMM registers
+;; used during the transposition of the digests
+%define Y_KTMP1 ymm12
+%define Y_KTMP2 ymm13
+;; Temporary registers used during MD5 round operations
+%define Y_FUN ymm8
+%define Y_TMP ymm9
+%define Y_FUN2 ymm10
+%define Y_TMP2 ymm11
+
+;; YMM registers used during data fetching.
+;; Data are stored into the stack after transposition
+%define Y_DAT0 ymm8
+%define Y_DAT1 ymm9
+%define Y_DAT2 ymm10
+%define Y_DAT3 ymm11
+%define Y_DAT4 ymm12
+%define Y_DAT5 ymm13
+%define Y_DAT6 ymm14
+%define Y_DAT7 ymm15
+
+;; Temporary registers used during data transposition
+%define Y_DTMP1 ymm0
+%define Y_DTMP2 ymm1
+
+
+%define RESY resb 32*
+;; Assume stack aligned to 32 bytes before call
+;; Therefore FRAMESIZE mod 32 must be 32-8 = 24
+struc STACK
+_DATA: RESY 2*2*16 ; 2 blocks * 2 sets of lanes * 16 regs
+_DIGEST: RESY 8 ; stores Y_AA-Y_DD, Y_AA2-Y_DD2
+_TMPDIGEST: RESY 2 ; stores Y_AA, Y_BB temporarily
+ resb 24 ; align
+endstruc
+
+
+%define Y_AA rsp + _DIGEST + 32*0
+%define Y_BB rsp + _DIGEST + 32*1
+%define Y_CC rsp + _DIGEST + 32*2
+%define Y_DD rsp + _DIGEST + 32*3
+%define Y_AA2 rsp + _DIGEST + 32*4
+%define Y_BB2 rsp + _DIGEST + 32*5
+%define Y_CC2 rsp + _DIGEST + 32*6
+%define Y_DD2 rsp + _DIGEST + 32*7
+
+;;
+;; MD5 left rotations (number of bits)
+;;
+rot11 equ 7
+rot12 equ 12
+rot13 equ 17
+rot14 equ 22
+rot21 equ 5
+rot22 equ 9
+rot23 equ 14
+rot24 equ 20
+rot31 equ 4
+rot32 equ 11
+rot33 equ 16
+rot34 equ 23
+rot41 equ 6
+rot42 equ 10
+rot43 equ 15
+rot44 equ 21
+
+
+;;
+;; Magic functions defined in RFC 1321
+;;
+; macro MAGIC_F F,X,Y,Z ;; F = ((Z) ^ ((X) & ((Y) ^ (Z))))
+%macro MAGIC_F 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z, %%Y
+ vpand %%F,%%F,%%X
+ vpxor %%F,%%F,%%Z
+%endmacro
+
+; macro MAGIC_G F,X,Y,Z ;; F = F((Z),(X),(Y))
+%macro MAGIC_G 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ MAGIC_F %%F,%%Z,%%X,%%Y
+%endmacro
+
+; macro MAGIC_H F,X,Y,Z ;; F = ((X) ^ (Y) ^ (Z))
+%macro MAGIC_H 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z, %%Y
+ vpxor %%F,%%F, %%X
+%endmacro
+
+; macro MAGIC_I F,X,Y,Z ;; F = ((Y) ^ ((X) | ~(Z)))
+%macro MAGIC_I 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ vpxor %%F,%%Z,[rel ONES] ; pnot %%F
+ vpor %%F,%%F,%%X
+ vpxor %%F,%%F,%%Y
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpsrld %%tmp, %%reg, (32-%%imm)
+ vpslld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+;;
+;; single MD5 step
+;;
+;; A = B +ROL32((A +MAGIC(B,C,D) +data +const), nrot)
+;;
+; macro MD5_STEP MAGIC_FUN, A,B,C,D, A2,B2,C3,D2, FUN, TMP, FUN2, TMP2, data,
+; MD5const, nrot
+%macro MD5_STEP 16
+%define %%MAGIC_FUN %1
+%define %%rA %2
+%define %%rB %3
+%define %%rC %4
+%define %%rD %5
+%define %%rA2 %6
+%define %%rB2 %7
+%define %%rC2 %8
+%define %%rD2 %9
+%define %%FUN %10
+%define %%TMP %11
+%define %%FUN2 %12
+%define %%TMP2 %13
+%define %%data %14
+%define %%MD5const %15
+%define %%nrot %16
+
+ vpaddd %%rA, %%rA, %%MD5const
+ vpaddd %%rA2, %%rA2, %%MD5const
+ vpaddd %%rA, %%rA, [%%data]
+ vpaddd %%rA2, %%rA2, [%%data + 16*32]
+ %%MAGIC_FUN %%FUN, %%rB,%%rC,%%rD
+ %%MAGIC_FUN %%FUN2, %%rB2,%%rC2,%%rD2
+ vpaddd %%rA, %%rA, %%FUN
+ vpaddd %%rA2, %%rA2, %%FUN2
+ PROLD %%rA,%%nrot, %%TMP
+ PROLD %%rA2,%%nrot, %%TMP2
+ vpaddd %%rA, %%rA, %%rB
+ vpaddd %%rA2, %%rA2, %%rB2
+%endmacro
+
+align 32
+
+; void md5_x8x2_avx(MD5_ARGS *args, UINT64 num_blks)
+; arg 1 : pointer to MD5_ARGS structure
+; arg 2 : number of blocks (>=1)
+
+MKGLOBAL(md5_x8x2_avx2,function,internal)
+md5_x8x2_avx2:
+ sub rsp, STACK_size
+
+ mov DPTR1, rsp
+ lea DPTR2, [rsp + 32*32]
+
+ ;; Load MD5 constant pointer to register
+ lea TBL, [rel MD5_TABLE]
+
+ ; Initialize index for data retrieval
+ xor IDX, IDX
+
+ ;; Fetch Pointers to Data Stream 1 to 8
+ mov inp0,[state + _data_ptr_md5+0*PTR_SZ]
+ mov inp1,[state + _data_ptr_md5+1*PTR_SZ]
+ mov inp2,[state + _data_ptr_md5+2*PTR_SZ]
+ mov inp3,[state + _data_ptr_md5+3*PTR_SZ]
+ mov inp4,[state + _data_ptr_md5+4*PTR_SZ]
+ mov inp5,[state + _data_ptr_md5+5*PTR_SZ]
+ mov inp6,[state + _data_ptr_md5+6*PTR_SZ]
+ mov inp7,[state + _data_ptr_md5+7*PTR_SZ]
+
+%assign I 0
+%rep 2
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR1+_DATA+(I*8+0)*32],Y_DAT0
+ vmovdqa [DPTR1+_DATA+(I*8+1)*32],Y_DAT1
+ vmovdqa [DPTR1+_DATA+(I*8+2)*32],Y_DAT2
+ vmovdqa [DPTR1+_DATA+(I*8+3)*32],Y_DAT3
+ vmovdqa [DPTR1+_DATA+(I*8+4)*32],Y_DAT4
+ vmovdqa [DPTR1+_DATA+(I*8+5)*32],Y_DAT5
+ vmovdqa [DPTR1+_DATA+(I*8+6)*32],Y_DAT6
+ vmovdqa [DPTR1+_DATA+(I*8+7)*32],Y_DAT7
+
+%assign I (I+1)
+%endrep
+
+ ;; Fetch Pointers to Data Stream 9 to 16
+ mov inp0,[state + _data_ptr_md5 + 8*8]
+ mov inp1,[state + _data_ptr_md5 + 9*8]
+ mov inp2,[state + _data_ptr_md5 + 10*8]
+ mov inp3,[state + _data_ptr_md5 + 11*8]
+ mov inp4,[state + _data_ptr_md5 + 12*8]
+ mov inp5,[state + _data_ptr_md5 + 13*8]
+ mov inp6,[state + _data_ptr_md5 + 14*8]
+ mov inp7,[state + _data_ptr_md5 + 15*8]
+
+%assign I 0
+%rep 2
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR1+_DATA+((I+2)*8+0)*32],Y_DAT0
+ vmovdqa [DPTR1+_DATA+((I+2)*8+1)*32],Y_DAT1
+ vmovdqa [DPTR1+_DATA+((I+2)*8+2)*32],Y_DAT2
+ vmovdqa [DPTR1+_DATA+((I+2)*8+3)*32],Y_DAT3
+ vmovdqa [DPTR1+_DATA+((I+2)*8+4)*32],Y_DAT4
+ vmovdqa [DPTR1+_DATA+((I+2)*8+5)*32],Y_DAT5
+ vmovdqa [DPTR1+_DATA+((I+2)*8+6)*32],Y_DAT6
+ vmovdqa [DPTR1+_DATA+((I+2)*8+7)*32],Y_DAT7
+
+%assign I (I+1)
+%endrep
+ ;; digests are already transposed
+ vmovdqu Y_A,[state + 0 * MD5_DIGEST_ROW_SIZE ]
+ vmovdqu Y_B,[state + 1 * MD5_DIGEST_ROW_SIZE ]
+ vmovdqu Y_C,[state + 2 * MD5_DIGEST_ROW_SIZE ]
+ vmovdqu Y_D,[state + 3 * MD5_DIGEST_ROW_SIZE ]
+
+ ; Load the digest for each stream (9-16)
+ vmovdqu Y_A2,[state + 0 * MD5_DIGEST_ROW_SIZE + 32]
+ vmovdqu Y_B2,[state + 1 * MD5_DIGEST_ROW_SIZE + 32]
+ vmovdqu Y_C2,[state + 2 * MD5_DIGEST_ROW_SIZE + 32]
+ vmovdqu Y_D2,[state + 3 * MD5_DIGEST_ROW_SIZE + 32]
+
+lloop:
+
+ ; save old digests to stack
+ vmovdqa [Y_AA], Y_A
+ vmovdqa [Y_BB], Y_B
+ vmovdqa [Y_CC], Y_C
+ vmovdqa [Y_DD], Y_D
+
+ vmovdqa [Y_AA2], Y_A2
+ vmovdqa [Y_BB2], Y_B2
+ vmovdqa [Y_CC2], Y_C2
+ vmovdqa [Y_DD2], Y_D2
+
+ ;; Increment IDX to point to next data block (64 bytes per block)
+ add IDX, 64
+
+ ;; Update size of remaining blocks to process
+ sub num_blks, 1
+ je lastblock
+
+ ; Perform the 64 rounds of processing ...
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+ 0*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+ 1*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+ 2*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+ 3*32], rot14
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+ 4*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+ 5*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+ 6*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+ 7*32], rot14
+
+
+ ;; Fetch Pointers to Data Stream 1 to 8 ??
+ mov inp0,[state + _data_ptr_md5 + 0*8]
+ mov inp1,[state + _data_ptr_md5 + 1*8]
+ mov inp2,[state + _data_ptr_md5 + 2*8]
+ mov inp3,[state + _data_ptr_md5 + 3*8]
+ mov inp4,[state + _data_ptr_md5 + 4*8]
+ mov inp5,[state + _data_ptr_md5 + 5*8]
+ mov inp6,[state + _data_ptr_md5 + 6*8]
+ mov inp7,[state + _data_ptr_md5 + 7*8]
+
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+ 8*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+ 9*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+10*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+11*32], rot14
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+12*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+13*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+14*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+15*32], rot14
+
+%assign I 0
+
+ ; Y_A and Y_B share the same registers with Y_DTMP1 and Y_DTMP2
+ ; Therefore we need to save these to stack and restore after transpose
+ vmovdqa [rsp + _TMPDIGEST + 0*32], Y_A
+ vmovdqa [rsp + _TMPDIGEST + 1*32], Y_B
+
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR2+_DATA+(I*8+0)*32],Y_DAT0
+ vmovdqa [DPTR2+_DATA+(I*8+1)*32],Y_DAT1
+ vmovdqa [DPTR2+_DATA+(I*8+2)*32],Y_DAT2
+ vmovdqa [DPTR2+_DATA+(I*8+3)*32],Y_DAT3
+ vmovdqa [DPTR2+_DATA+(I*8+4)*32],Y_DAT4
+ vmovdqa [DPTR2+_DATA+(I*8+5)*32],Y_DAT5
+ vmovdqa [DPTR2+_DATA+(I*8+6)*32],Y_DAT6
+ vmovdqa [DPTR2+_DATA+(I*8+7)*32],Y_DAT7
+
+ ; Restore Y_A and Y_B
+ vmovdqa Y_A, [rsp + _TMPDIGEST + 0*32]
+ vmovdqa Y_B, [rsp + _TMPDIGEST + 1*32]
+
+
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+16*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+17*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+18*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+19*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+20*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+21*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+22*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+23*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+24*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+25*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+26*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+27*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+28*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+29*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+30*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+31*32], rot24
+
+%assign I (I+1)
+
+ ; Y_A and Y_B share the same registers with Y_DTMP1 and Y_DTMP2
+ ; Therefore we need to save these to stack and restore after transpose
+ vmovdqa [rsp + _TMPDIGEST + 0*32], Y_A
+ vmovdqa [rsp + _TMPDIGEST + 1*32], Y_B
+
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR2+_DATA+(I*8+0)*32],Y_DAT0
+ vmovdqa [DPTR2+_DATA+(I*8+1)*32],Y_DAT1
+ vmovdqa [DPTR2+_DATA+(I*8+2)*32],Y_DAT2
+ vmovdqa [DPTR2+_DATA+(I*8+3)*32],Y_DAT3
+ vmovdqa [DPTR2+_DATA+(I*8+4)*32],Y_DAT4
+ vmovdqa [DPTR2+_DATA+(I*8+5)*32],Y_DAT5
+ vmovdqa [DPTR2+_DATA+(I*8+6)*32],Y_DAT6
+ vmovdqa [DPTR2+_DATA+(I*8+7)*32],Y_DAT7
+
+ ; Restore Y_A and Y_B
+ vmovdqa Y_A, [rsp + _TMPDIGEST + 0*32]
+ vmovdqa Y_B, [rsp + _TMPDIGEST + 1*32]
+
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+32*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+33*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+34*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+35*32], rot34
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+36*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+37*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+38*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+39*32], rot34
+
+ ;; Fetch Pointers to Data Stream 9 to 16
+ mov inp0,[state + _data_ptr_md5 + 8*8]
+ mov inp1,[state + _data_ptr_md5 + 9*8]
+ mov inp2,[state + _data_ptr_md5 + 10*8]
+ mov inp3,[state + _data_ptr_md5 + 11*8]
+ mov inp4,[state + _data_ptr_md5 + 12*8]
+ mov inp5,[state + _data_ptr_md5 + 13*8]
+ mov inp6,[state + _data_ptr_md5 + 14*8]
+ mov inp7,[state + _data_ptr_md5 + 15*8]
+
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+40*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+41*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+42*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+43*32], rot34
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+44*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+45*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+46*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+47*32], rot34
+
+%assign I 0
+
+ ; Y_A and Y_B share the same registers with Y_DTMP1 and Y_DTMP2
+ ; Therefore we need to save these to stack and restore after transpose
+ vmovdqa [rsp + _TMPDIGEST + 0*32], Y_A
+ vmovdqa [rsp + _TMPDIGEST + 1*32], Y_B
+
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR2+_DATA+((I+2)*8+0)*32],Y_DAT0
+ vmovdqa [DPTR2+_DATA+((I+2)*8+1)*32],Y_DAT1
+ vmovdqa [DPTR2+_DATA+((I+2)*8+2)*32],Y_DAT2
+ vmovdqa [DPTR2+_DATA+((I+2)*8+3)*32],Y_DAT3
+ vmovdqa [DPTR2+_DATA+((I+2)*8+4)*32],Y_DAT4
+ vmovdqa [DPTR2+_DATA+((I+2)*8+5)*32],Y_DAT5
+ vmovdqa [DPTR2+_DATA+((I+2)*8+6)*32],Y_DAT6
+ vmovdqa [DPTR2+_DATA+((I+2)*8+7)*32],Y_DAT7
+
+ ; Restore Y_A and Y_B
+ vmovdqa Y_A, [rsp + _TMPDIGEST + 0*32]
+ vmovdqa Y_B, [rsp + _TMPDIGEST + 1*32]
+
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+48*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+49*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+50*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+51*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+52*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+53*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+54*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+55*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+56*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+57*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+58*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+59*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+60*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+61*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+62*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+63*32], rot44
+
+%assign I (I+1)
+
+ ; Y_A and Y_B share the same registers with Y_DTMP1 and Y_DTMP2
+ ; Therefore we need to save these to stack and restore after transpose
+ vmovdqa [rsp + _TMPDIGEST + 0*32], Y_A
+ vmovdqa [rsp + _TMPDIGEST + 1*32], Y_B
+
+ TRANSPOSE8_U32_LOAD8 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, inp6, inp7, IDX+I*32
+
+ TRANSPOSE8_U32 Y_DAT0, Y_DAT1, Y_DAT2, Y_DAT3, Y_DAT4, Y_DAT5, Y_DAT6, Y_DAT7, Y_DTMP1, Y_DTMP2
+ vmovdqa [DPTR2+_DATA+((I+2)*8+0)*32],Y_DAT0
+ vmovdqa [DPTR2+_DATA+((I+2)*8+1)*32],Y_DAT1
+ vmovdqa [DPTR2+_DATA+((I+2)*8+2)*32],Y_DAT2
+ vmovdqa [DPTR2+_DATA+((I+2)*8+3)*32],Y_DAT3
+ vmovdqa [DPTR2+_DATA+((I+2)*8+4)*32],Y_DAT4
+ vmovdqa [DPTR2+_DATA+((I+2)*8+5)*32],Y_DAT5
+ vmovdqa [DPTR2+_DATA+((I+2)*8+6)*32],Y_DAT6
+ vmovdqa [DPTR2+_DATA+((I+2)*8+7)*32],Y_DAT7
+
+ ; Restore Y_A and Y_B
+ vmovdqa Y_A, [rsp + _TMPDIGEST + 0*32]
+ vmovdqa Y_B, [rsp + _TMPDIGEST + 1*32]
+
+ ; Add results to old digest values
+
+ vpaddd Y_A,Y_A,[Y_AA]
+ vpaddd Y_B,Y_B,[Y_BB]
+ vpaddd Y_C,Y_C,[Y_CC]
+ vpaddd Y_D,Y_D,[Y_DD]
+
+ vpaddd Y_A2,Y_A2,[Y_AA2]
+ vpaddd Y_B2,Y_B2,[Y_BB2]
+ vpaddd Y_C2,Y_C2,[Y_CC2]
+ vpaddd Y_D2,Y_D2,[Y_DD2]
+
+ ; Swap DPTR1 and DPTR2
+ xchg DPTR1, DPTR2
+
+ ;; Proceed to processing of next block
+ jmp lloop
+
+lastblock:
+
+ ; Perform the 64 rounds of processing ...
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+ 0*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+ 1*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+ 2*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+ 3*32], rot14
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+ 4*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+ 5*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+ 6*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+ 7*32], rot14
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+ 8*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+ 9*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+10*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+11*32], rot14
+ MD5_STEP MAGIC_F, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+12*32], rot11
+ MD5_STEP MAGIC_F, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+13*32], rot12
+ MD5_STEP MAGIC_F, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+14*32], rot13
+ MD5_STEP MAGIC_F, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+15*32], rot14
+
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+16*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+17*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+18*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+19*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+20*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+21*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+22*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+23*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+24*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+25*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+26*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+27*32], rot24
+ MD5_STEP MAGIC_G, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+28*32], rot21
+ MD5_STEP MAGIC_G, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+29*32], rot22
+ MD5_STEP MAGIC_G, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+30*32], rot23
+ MD5_STEP MAGIC_G, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+31*32], rot24
+
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+32*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+33*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+34*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+35*32], rot34
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+36*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+37*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+38*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+39*32], rot34
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+40*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+41*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+42*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+43*32], rot34
+ MD5_STEP MAGIC_H, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+44*32], rot31
+ MD5_STEP MAGIC_H, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+45*32], rot32
+ MD5_STEP MAGIC_H, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+46*32], rot33
+ MD5_STEP MAGIC_H, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+47*32], rot34
+
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 0*32, [TBL+48*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 7*32, [TBL+49*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+14*32, [TBL+50*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 5*32, [TBL+51*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+12*32, [TBL+52*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 3*32, [TBL+53*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+10*32, [TBL+54*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 1*32, [TBL+55*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 8*32, [TBL+56*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+15*32, [TBL+57*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 6*32, [TBL+58*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+13*32, [TBL+59*32], rot44
+ MD5_STEP MAGIC_I, Y_A,Y_B,Y_C,Y_D, Y_A2,Y_B2,Y_C2,Y_D2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 4*32, [TBL+60*32], rot41
+ MD5_STEP MAGIC_I, Y_D,Y_A,Y_B,Y_C, Y_D2,Y_A2,Y_B2,Y_C2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+11*32, [TBL+61*32], rot42
+ MD5_STEP MAGIC_I, Y_C,Y_D,Y_A,Y_B, Y_C2,Y_D2,Y_A2,Y_B2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 2*32, [TBL+62*32], rot43
+ MD5_STEP MAGIC_I, Y_B,Y_C,Y_D,Y_A, Y_B2,Y_C2,Y_D2,Y_A2, Y_FUN,Y_TMP, Y_FUN2,Y_TMP2, DPTR1+ 9*32, [TBL+63*32], rot44
+
+ ;; update into data pointers
+%assign I 0
+%rep 8
+ mov inp0, [state + _data_ptr_md5 + (2*I)*8]
+ mov inp1, [state + _data_ptr_md5 + (2*I +1)*8]
+ add inp0, IDX
+ add inp1, IDX
+ mov [state + _data_ptr_md5 + (2*I)*8], inp0
+ mov [state + _data_ptr_md5 + (2*I+1)*8], inp1
+%assign I (I+1)
+%endrep
+
+ vpaddd Y_A,Y_A,[Y_AA]
+ vpaddd Y_B,Y_B,[Y_BB]
+ vpaddd Y_C,Y_C,[Y_CC]
+ vpaddd Y_D,Y_D,[Y_DD]
+
+ vpaddd Y_A2,Y_A2,[Y_AA2]
+ vpaddd Y_B2,Y_B2,[Y_BB2]
+ vpaddd Y_C2,Y_C2,[Y_CC2]
+ vpaddd Y_D2,Y_D2,[Y_DD2]
+
+ vmovdqu [state + 0*MD5_DIGEST_ROW_SIZE ],Y_A
+ vmovdqu [state + 1*MD5_DIGEST_ROW_SIZE ],Y_B
+ vmovdqu [state + 2*MD5_DIGEST_ROW_SIZE ],Y_C
+ vmovdqu [state + 3*MD5_DIGEST_ROW_SIZE ],Y_D
+
+
+ vmovdqu [state + 0*MD5_DIGEST_ROW_SIZE + 32 ],Y_A2 ;; 32 is YMM width
+ vmovdqu [state + 1*MD5_DIGEST_ROW_SIZE + 32 ],Y_B2
+ vmovdqu [state + 2*MD5_DIGEST_ROW_SIZE + 32 ],Y_C2
+ vmovdqu [state + 3*MD5_DIGEST_ROW_SIZE + 32 ],Y_D2
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame ((64+8+2)*32 bytes)
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+%assign i 0
+%rep (2*2*16+8+2)
+ vmovdqa [rsp + i*32], ymm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, STACK_size
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/sha1_x8_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/sha1_x8_avx2.asm
new file mode 100644
index 000000000..d614e1b0e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/sha1_x8_avx2.asm
@@ -0,0 +1,466 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;; Windows clobbers: rax rdx r8 r9 r10 r11 r12 r13 r14 r15
+;; Windows preserves: rbx rcx rsi rdi rbp
+;;
+;; Linux clobbers: rax rdx rsi r9 r10 r11 r12 r13 r14 r15
+;; Linux preserves: rbx rcx rdi rbp r8
+;;
+;; clobbers ymm0-15
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx2.asm"
+
+section .data
+default rel
+align 32
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 r8
+%endif
+
+%define state arg1
+%define num_blks arg2
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+%define inp4 r13
+%define inp5 r14
+%define inp6 r15
+%define inp7 reg3
+
+%define IDX rax
+
+; ymm0 A
+; ymm1 B
+; ymm2 C
+; ymm3 D
+; ymm4 E
+; ymm5 F AA
+; ymm6 T0 BB
+; ymm7 T1 CC
+; ymm8 T2 DD
+; ymm9 T3 EE
+; ymm10 T4 TMP
+; ymm11 T5 FUN
+; ymm12 T6 K
+; ymm13 T7 W14
+; ymm14 T8 W15
+; ymm15 T9 W16
+
+%define A ymm0
+%define B ymm1
+%define C ymm2
+%define D ymm3
+%define E ymm4
+
+%define F ymm5
+%define T0 ymm6
+%define T1 ymm7
+%define T2 ymm8
+%define T3 ymm9
+%define T4 ymm10
+%define T5 ymm11
+%define T6 ymm12
+%define T7 ymm13
+%define T8 ymm14
+%define T9 ymm15
+
+%define AA ymm5
+%define BB ymm6
+%define CC ymm7
+%define DD ymm8
+%define EE ymm9
+%define TMP ymm10
+%define FUN ymm11
+%define K ymm12
+%define W14 ymm13
+%define W15 ymm14
+%define W16 ymm15
+
+
+;; Assume stack aligned to 32 bytes before call
+;; Therefore FRAMESIZE mod 32 must be 32-8 = 24
+%define FRAMESZ 32*16 + 24
+
+%define VMOVPS vmovups
+
+;;
+;; Magic functions defined in FIPS 180-1
+;;
+;MAGIC_F0 MACRO regF:REQ,regB:REQ,regC:REQ,regD:REQ,regT:REQ ;; ((D ^ (B & (C ^ D)))
+%macro MAGIC_F0 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ ;vmovdqa %%regF,%%regC
+ vpxor %%regF, %%regC,%%regD
+ vpand %%regF, %%regF,%%regB
+ vpxor %%regF, %%regF,%%regD
+%endmacro
+
+;MAGIC_F1 MACRO regF:REQ,regB:REQ,regC:REQ,regD:REQ,regT:REQ ;; (B ^ C ^ D)
+%macro MAGIC_F1 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ ;vmovdqa %%regF,%%regD
+ vpxor %%regF,%%regD,%%regC
+ vpxor %%regF,%%regF,%%regB
+%endmacro
+
+
+
+;MAGIC_F2 MACRO regF:REQ,regB:REQ,regC:REQ,regD:REQ,regT:REQ ;; ((B & C) | (B & D) | (C & D))
+%macro MAGIC_F2 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ ;vmovdqa %%regF,%%regB
+ ;vmovdqa %%regT,%%regB
+ vpor %%regF,%%regB,%%regC
+ vpand %%regT,%%regB,%%regC
+ vpand %%regF,%%regF,%%regD
+ vpor %%regF,%%regF,%%regT
+%endmacro
+
+;MAGIC_F3 MACRO regF:REQ,regB:REQ,regC:REQ,regD:REQ,regT:REQ
+%macro MAGIC_F3 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ ;vmovdqa %%tmp, %%reg
+ vpsrld %%tmp, %%reg, (32-%%imm)
+ vpslld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ ;vmovdqa %%tmp, %%reg
+ vpsrld %%tmp, %%src, (32-%%imm)
+ vpslld %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+%macro SHA1_STEP_00_15 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ vpaddd %%regE, %%regE,%%immCNT
+ vpaddd %%regE, %%regE,[rsp + (%%memW * 32)]
+ ;vmovdqa %%regT,%%regA
+ PROLD_nd %%regT,5, %%regF,%%regA
+ vpaddd %%regE, %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ vpaddd %%regE, %%regE,%%regF
+%endmacro
+
+%macro SHA1_STEP_16_79 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ vpaddd %%regE, %%regE,%%immCNT
+
+ vmovdqa W14, [rsp + ((%%memW - 14) & 15) * 32]
+ vpxor W16, W16, W14
+ vpxor W16, W16, [rsp + ((%%memW - 8) & 15) * 32]
+ vpxor W16, W16, [rsp + ((%%memW - 3) & 15) * 32]
+
+ ;vmovdqa %%regF, W16
+ vpsrld %%regF, W16, (32-1)
+ vpslld W16, W16, 1
+ vpor %%regF, %%regF, W16
+ ROTATE_W
+
+ vmovdqa [rsp + ((%%memW - 0) & 15) * 32],%%regF
+ vpaddd %%regE, %%regE,%%regF
+
+ ;vmovdqa %%regT,%%regA
+ PROLD_nd %%regT,5, %%regF, %%regA
+ vpaddd %%regE, %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ vpaddd %%regE,%%regE,%%regF
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+%macro ROTATE_W 0
+%xdefine TMP_ W16
+%xdefine W16 W15
+%xdefine W15 W14
+%xdefine W14 TMP_
+%endm
+
+align 32
+
+; void sha1_x8_avx2(void *state, int num_blks)
+; arg 1 : rcx : pointer to array[4] of pointer to input data
+; arg 2 : rdx : size (in blocks) ;; assumed to be >= 1
+MKGLOBAL(sha1_x8_avx2,function,internal)
+sha1_x8_avx2:
+ sub rsp, FRAMESZ
+
+ ;; Initialize digests
+ vmovdqu A, [state + 0*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu B, [state + 1*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu C, [state + 2*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu D, [state + 3*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu E, [state + 4*SHA1_DIGEST_ROW_SIZE]
+ DBGPRINTL_YMM "Sha1-AVX2 incoming transposed digest", A, B, C, D, E
+
+ ;; transpose input onto stack
+ mov inp0,[state+_data_ptr_sha1+0*PTR_SZ]
+ mov inp1,[state+_data_ptr_sha1+1*PTR_SZ]
+ mov inp2,[state+_data_ptr_sha1+2*PTR_SZ]
+ mov inp3,[state+_data_ptr_sha1+3*PTR_SZ]
+ mov inp4,[state+_data_ptr_sha1+4*PTR_SZ]
+ mov inp5,[state+_data_ptr_sha1+5*PTR_SZ]
+ mov inp6,[state+_data_ptr_sha1+6*PTR_SZ]
+ mov inp7,[state+_data_ptr_sha1+7*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ vmovdqa F, [rel PSHUFFLE_BYTE_FLIP_MASK]
+%assign I 0
+%rep 2
+ TRANSPOSE8_U32_LOAD8 T0, T1, T2, T3, T4, T5, T6, T7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+
+ TRANSPOSE8_U32 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
+ DBGPRINTL_YMM "Sha1-AVX2 incoming transposed input", T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
+ vpshufb T0, T0, F
+ vmovdqa [rsp+(I*8+0)*32],T0
+ vpshufb T1, T1, F
+ vmovdqa [rsp+(I*8+1)*32],T1
+ vpshufb T2, T2, F
+ vmovdqa [rsp+(I*8+2)*32],T2
+ vpshufb T3, T3, F
+ vmovdqa [rsp+(I*8+3)*32],T3
+ vpshufb T4, T4, F
+ vmovdqa [rsp+(I*8+4)*32],T4
+ vpshufb T5, T5, F
+ vmovdqa [rsp+(I*8+5)*32],T5
+ vpshufb T6, T6, F
+ vmovdqa [rsp+(I*8+6)*32],T6
+ vpshufb T7, T7, F
+ vmovdqa [rsp+(I*8+7)*32],T7
+ add IDX, 32
+%assign I (I+1)
+%endrep
+
+
+ ; save old digests
+ vmovdqa AA, A
+ vmovdqa BB, B
+ vmovdqa CC, C
+ vmovdqa DD, D
+ vmovdqa EE, E
+
+;;
+;; perform 0-79 steps
+;;
+ vmovdqa K, [rel K00_19]
+;; do rounds 0...15
+%assign I 0
+%rep 16
+ SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 16...19
+ vmovdqa W16, [rsp + ((16 - 16) & 15) * 32]
+ vmovdqa W15, [rsp + ((16 - 15) & 15) * 32]
+%rep 4
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 20...39
+ vmovdqa K, [rel K20_39]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 40...59
+ vmovdqa K, [rel K40_59]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 60...79
+ vmovdqa K, [rel K60_79]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+ vpaddd A,A,AA
+ vpaddd B,B,BB
+ vpaddd C,C,CC
+ vpaddd D,D,DD
+ vpaddd E,E,EE
+
+ sub num_blks, 1
+ jne lloop
+
+ ; write out digests
+ vmovdqu [state + 0*SHA1_DIGEST_ROW_SIZE], A
+ vmovdqu [state + 1*SHA1_DIGEST_ROW_SIZE], B
+ vmovdqu [state + 2*SHA1_DIGEST_ROW_SIZE], C
+ vmovdqu [state + 3*SHA1_DIGEST_ROW_SIZE], D
+ vmovdqu [state + 4*SHA1_DIGEST_ROW_SIZE], E
+ DBGPRINTL_YMM "Sha1-AVX2 outgoing transposed digest", A, B, C, D, E
+ ;; update input pointers
+ add inp0, IDX
+ add inp1, IDX
+ add inp2, IDX
+ add inp3, IDX
+ add inp4, IDX
+ add inp5, IDX
+ add inp6, IDX
+ add inp7, IDX
+ mov [state+_data_ptr_sha1+0*PTR_SZ], inp0
+ mov [state+_data_ptr_sha1+1*PTR_SZ], inp1
+ mov [state+_data_ptr_sha1+2*PTR_SZ], inp2
+ mov [state+_data_ptr_sha1+3*PTR_SZ], inp3
+ mov [state+_data_ptr_sha1+4*PTR_SZ], inp4
+ mov [state+_data_ptr_sha1+5*PTR_SZ], inp5
+ mov [state+_data_ptr_sha1+6*PTR_SZ], inp6
+ mov [state+_data_ptr_sha1+7*PTR_SZ], inp7
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame (16*32 bytes)
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+%assign i 0
+%rep 16
+ vmovdqa [rsp + i*32], ymm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, FRAMESZ
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/sha256_oct_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/sha256_oct_avx2.asm
new file mode 100644
index 000000000..08361609d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/sha256_oct_avx2.asm
@@ -0,0 +1,587 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute oct SHA256 using SSE-256
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
+;; Stack must be aligned to 32 bytes before call
+;; Windows clobbers: rax rbx rdx rsi rdi r8 r9 r10 r11 r12 r13 r14
+;; Windows preserves: rcx rbp r15
+;;
+;; Linux clobbers: rax rbx rcx rdx rsi r8 r9 r10 r11 r12 r13 r14
+;; Linux preserves: rdi rbp r15
+;;
+;; clobbers ymm0-15
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx2.asm"
+
+section .data
+default rel
+align 64
+;global K256_8
+K256_8:
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+align 64
+MKGLOBAL(K256,data,internal)
+K256:
+ dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+section .text
+
+%ifdef LINUX
+ %define arg1 rdi
+ %define arg2 rsi
+ %define reg3 rcx
+ %define reg4 rdx
+%else
+ ; Windows definitions
+ %define arg1 rcx
+ %define arg2 rdx
+ %define reg3 rsi
+ %define reg4 rdi
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND rbx
+%define TBL reg3
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+%define inp4 r13
+%define inp5 r14
+%define inp6 r8
+%define inp7 reg4
+
+; ymm0 a
+; ymm1 b
+; ymm2 c
+; ymm3 d
+; ymm4 e
+; ymm5 f
+; ymm6 g TMP0
+; ymm7 h TMP1
+; ymm8 T1 TT0
+; ymm9 TT1
+; ymm10 TT2
+; ymm11 TT3
+; ymm12 a0 TT4
+; ymm13 a1 TT5
+; ymm14 a2 TT6
+; ymm15 TMP TT7
+
+%define a ymm0
+%define b ymm1
+%define c ymm2
+%define d ymm3
+%define e ymm4
+%define f ymm5
+%define g ymm6
+%define h ymm7
+
+%define T1 ymm8
+
+%define a0 ymm12
+%define a1 ymm13
+%define a2 ymm14
+%define TMP ymm15
+
+%define TMP0 ymm6
+%define TMP1 ymm7
+
+%define TT0 ymm8
+%define TT1 ymm9
+%define TT2 ymm10
+%define TT3 ymm11
+%define TT4 ymm12
+%define TT5 ymm13
+%define TT6 ymm14
+%define TT7 ymm15
+
+%define SZ8 8*SHA256_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 64*SZ8
+
+; Define stack usage
+
+;; Assume stack aligned to 32 bytes before call
+;; Therefore FRAMESZ mod 32 must be 32-8 = 24
+struc stack_frame
+ .data resb 16*SZ8
+ .digest resb 8*SZ8
+ .ytmp resb 4*SZ8
+ .align resb 24
+endstruc
+%define FRAMESZ stack_frame_size
+%define _DIGEST stack_frame.digest
+%define _YTMP stack_frame.ytmp
+
+%define YTMP0 rsp + _YTMP + 0*SZ8
+%define YTMP1 rsp + _YTMP + 1*SZ8
+%define YTMP2 rsp + _YTMP + 2*SZ8
+%define YTMP3 rsp + _YTMP + 3*SZ8
+
+%define VMOVPS vmovups
+
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORD reg, imm, tmp
+%macro PRORD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpslld %%tmp, %%reg, (32-(%%imm))
+ vpsrld %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; non-destructive
+; PRORD_nd reg, imm, tmp, src
+%macro PRORD_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ ;vmovdqa %%tmp, %%reg
+ vpslld %%tmp, %%src, (32-(%%imm))
+ vpsrld %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; PRORD dst/src, amt
+%macro PRORD 2
+ PRORD %1, %2, TMP
+%endmacro
+
+; PRORD_nd dst, src, amt
+%macro PRORD_nd 3
+ PRORD_nd %1, %3, TMP, %2
+%endmacro
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ PRORD_nd a0, e, (11-6) ; sig1: a0 = (e >> 5)
+
+ vpxor a2, f, g ; ch: a2 = f^g
+ vpand a2, a2, e ; ch: a2 = (f^g)&e
+ vpxor a2, a2, g ; a2 = ch
+
+ PRORD_nd a1, e, 25 ; sig1: a1 = (e >> 25)
+ vmovdqa [SZ8*(%%i&0xf) + rsp], %%T1
+ vpaddd %%T1, %%T1, [TBL + ROUND] ; T1 = W + K
+ vpxor a0, a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORD a0, 6 ; sig1: a0 = (e >> 6) ^ (e >> 11)
+ vpaddd h, h, a2 ; h = h + ch
+ PRORD_nd a2, a, (13-2) ; sig0: a2 = (a >> 11)
+ vpaddd h, h, %%T1 ; h = h + ch + W + K
+ vpxor a0, a0, a1 ; a0 = sigma1
+ PRORD_nd a1, a, 22 ; sig0: a1 = (a >> 22)
+ vpxor %%T1, a, c ; maj: T1 = a^c
+ add ROUND, SZ8 ; ROUND++
+ vpand %%T1, %%T1, b ; maj: T1 = (a^c)&b
+ vpaddd h, h, a0
+
+ vpaddd d, d, h
+
+ vpxor a2, a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORD a2, 2 ; sig0: a2 = (a >> 2) ^ (a >> 13)
+ vpxor a2, a2, a1 ; a2 = sig0
+ vpand a1, a, c ; maj: a1 = a&c
+ vpor a1, a1, %%T1 ; a1 = maj
+ vpaddd h, h, a1 ; h = h + ch + W + K + maj
+ vpaddd h, h, a2 ; h = h + ch + W + K + maj + sigma0
+
+ ROTATE_ARGS
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ vmovdqa %%T1, [SZ8*((%%i-15)&0xf) + rsp]
+ vmovdqa a1, [SZ8*((%%i-2)&0xf) + rsp]
+ vmovdqa a0, %%T1
+ PRORD %%T1, 18-7
+ vmovdqa a2, a1
+ PRORD a1, 19-17
+ vpxor %%T1, %%T1, a0
+ PRORD %%T1, 7
+ vpxor a1, a1, a2
+ PRORD a1, 17
+ vpsrld a0, a0, 3
+ vpxor %%T1, %%T1, a0
+ vpsrld a2, a2, 10
+ vpxor a1, a1, a2
+ vpaddd %%T1, %%T1, [SZ8*((%%i-16)&0xf) + rsp]
+ vpaddd a1, a1, [SZ8*((%%i-7)&0xf) + rsp]
+ vpaddd %%T1, %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+
+%endm
+
+
+;; SHA256_ARGS:
+;; UINT128 digest[8]; // transposed digests
+;; UINT8 *data_ptr[4];
+;;
+
+;; void sha256_oct_avx2(SHA256_ARGS *args, UINT64 bytes);
+;; arg 1 : STATE : pointer to array of pointers to input data
+;; arg 2 : INP_SIZE : size of input in blocks
+MKGLOBAL(sha256_oct_avx2,function,internal)
+align 16
+sha256_oct_avx2:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+ sub rsp, FRAMESZ
+
+ ;; Load the pre-transposed incoming digest.
+ vmovdqu a,[STATE + 0*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu b,[STATE + 1*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu c,[STATE + 2*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu d,[STATE + 3*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu e,[STATE + 4*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu f,[STATE + 5*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu g,[STATE + 6*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu h,[STATE + 7*SHA256_DIGEST_ROW_SIZE]
+
+ lea TBL,[rel K256_8]
+
+ ;; load the address of each of the 4 message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha256 + 0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha256 + 1*PTR_SZ]
+ mov inp2,[STATE + _data_ptr_sha256 + 2*PTR_SZ]
+ mov inp3,[STATE + _data_ptr_sha256 + 3*PTR_SZ]
+ mov inp4,[STATE + _data_ptr_sha256 + 4*PTR_SZ]
+ mov inp5,[STATE + _data_ptr_sha256 + 5*PTR_SZ]
+ mov inp6,[STATE + _data_ptr_sha256 + 6*PTR_SZ]
+ mov inp7,[STATE + _data_ptr_sha256 + 7*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ ;; save old digest
+ vmovdqa [rsp + _DIGEST + 0*SZ8], a
+ vmovdqa [rsp + _DIGEST + 1*SZ8], b
+ vmovdqa [rsp + _DIGEST + 2*SZ8], c
+ vmovdqa [rsp + _DIGEST + 3*SZ8], d
+ vmovdqa [rsp + _DIGEST + 4*SZ8], e
+ vmovdqa [rsp + _DIGEST + 5*SZ8], f
+ vmovdqa [rsp + _DIGEST + 6*SZ8], g
+ vmovdqa [rsp + _DIGEST + 7*SZ8], h
+ DBGPRINTL_YMM "transposed digest ", a,b,c,d,e,f,g,h
+%assign i 0
+%rep 2
+ TRANSPOSE8_U32_LOAD8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX+i*32
+
+ vmovdqa [YTMP0], g
+ vmovdqa [YTMP1], h
+ TRANSPOSE8_U32 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
+ DBGPRINTL_YMM "transposed input ", TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7
+ vmovdqa TMP1, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ vmovdqa g, [YTMP0]
+ vpshufb TT0, TT0, TMP1
+ vpshufb TT1, TT1, TMP1
+ vpshufb TT2, TT2, TMP1
+ vpshufb TT3, TT3, TMP1
+ vpshufb TT4, TT4, TMP1
+ vpshufb TT5, TT5, TMP1
+ vpshufb TT6, TT6, TMP1
+ vpshufb TT7, TT7, TMP1
+ vmovdqa h, [YTMP1]
+ vmovdqa [YTMP0], TT4
+ vmovdqa [YTMP1], TT5
+ vmovdqa [YTMP2], TT6
+ vmovdqa [YTMP3], TT7
+ ROUND_00_15 TT0,(i*8+0)
+ vmovdqa TT0, [YTMP0]
+ ROUND_00_15 TT1,(i*8+1)
+ vmovdqa TT1, [YTMP1]
+ ROUND_00_15 TT2,(i*8+2)
+ vmovdqa TT2, [YTMP2]
+ ROUND_00_15 TT3,(i*8+3)
+ vmovdqa TT3, [YTMP3]
+ ROUND_00_15 TT0,(i*8+4)
+ ROUND_00_15 TT1,(i*8+5)
+ ROUND_00_15 TT2,(i*8+6)
+ ROUND_00_15 TT3,(i*8+7)
+%assign i (i+1)
+%endrep
+ add IDX, 4*4*4
+
+%assign i (i*8)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ vpaddd a, a, [rsp + _DIGEST + 0*SZ8]
+ vpaddd b, b, [rsp + _DIGEST + 1*SZ8]
+ vpaddd c, c, [rsp + _DIGEST + 2*SZ8]
+ vpaddd d, d, [rsp + _DIGEST + 3*SZ8]
+ vpaddd e, e, [rsp + _DIGEST + 4*SZ8]
+ vpaddd f, f, [rsp + _DIGEST + 5*SZ8]
+ vpaddd g, g, [rsp + _DIGEST + 6*SZ8]
+ vpaddd h, h, [rsp + _DIGEST + 7*SZ8]
+
+ sub INP_SIZE, 1 ;; unit is blocks
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ vmovdqu [STATE + 0*SHA256_DIGEST_ROW_SIZE],a
+ vmovdqu [STATE + 1*SHA256_DIGEST_ROW_SIZE],b
+ vmovdqu [STATE + 2*SHA256_DIGEST_ROW_SIZE],c
+ vmovdqu [STATE + 3*SHA256_DIGEST_ROW_SIZE],d
+ vmovdqu [STATE + 4*SHA256_DIGEST_ROW_SIZE],e
+ vmovdqu [STATE + 5*SHA256_DIGEST_ROW_SIZE],f
+ vmovdqu [STATE + 6*SHA256_DIGEST_ROW_SIZE],g
+ vmovdqu [STATE + 7*SHA256_DIGEST_ROW_SIZE],h
+ DBGPRINTL_YMM "sha256 digest on exit ", a,b,c,d,e,f,g,h
+
+ ; update input pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha256 + 0*8], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha256 + 1*8], inp1
+ add inp2, IDX
+ mov [STATE + _data_ptr_sha256 + 2*8], inp2
+ add inp3, IDX
+ mov [STATE + _data_ptr_sha256 + 3*8], inp3
+ add inp4, IDX
+ mov [STATE + _data_ptr_sha256 + 4*8], inp4
+ add inp5, IDX
+ mov [STATE + _data_ptr_sha256 + 5*8], inp5
+ add inp6, IDX
+ mov [STATE + _data_ptr_sha256 + 6*8], inp6
+ add inp7, IDX
+ mov [STATE + _data_ptr_sha256 + 7*8], inp7
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+%ifdef SAFE_DATA
+ ;; Clear stack frame ((16+8+4)*32 bytes)
+ vpxor ymm0, ymm0
+%assign i 0
+%rep (16+8+4)
+ vmovdqa [rsp + i*SZ8], ymm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, FRAMESZ
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/sha512_x4_avx2.asm b/src/spdk/intel-ipsec-mb/avx2/sha512_x4_avx2.asm
new file mode 100644
index 000000000..80e8c8c57
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/sha512_x4_avx2.asm
@@ -0,0 +1,452 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute quad SHA512 using AVX
+;; use YMMs to tackle the larger digest size
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
+;; Stack must be aligned to 32 bytes before call
+;; Windows clobbers: rax rbx rdx r8 r9 r10 r11 r12
+;; Windows preserves: rcx rsi rdi rbp r13 r14 r15
+;;
+;; Linux clobbers: rax rbx rcx rdx rsi r8 r9 r10 r11 r12
+;; Linux preserves: rcx rdx rdi rbp r13 r14 r15
+;;
+;; clobbers ymm0-15
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "include/transpose_avx2.asm"
+%include "include/dbgprint.asm"
+%include "mb_mgr_datastruct.asm"
+
+section .data
+default rel
+align 64
+K512_4:
+ dq 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22
+ dq 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd
+ dq 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f
+ dq 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc
+ dq 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538
+ dq 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019
+ dq 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b
+ dq 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118
+ dq 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242
+ dq 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe
+ dq 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c
+ dq 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2
+ dq 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f
+ dq 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1
+ dq 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235
+ dq 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694
+ dq 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2
+ dq 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3
+ dq 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5
+ dq 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65
+ dq 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275
+ dq 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483
+ dq 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4
+ dq 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5
+ dq 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab
+ dq 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210
+ dq 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f
+ dq 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4
+ dq 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2
+ dq 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725
+ dq 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f
+ dq 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70
+ dq 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc
+ dq 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926
+ dq 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed
+ dq 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df
+ dq 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de
+ dq 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8
+ dq 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6
+ dq 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b
+ dq 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364
+ dq 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001
+ dq 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791
+ dq 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30
+ dq 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218
+ dq 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910
+ dq 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a
+ dq 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8
+ dq 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8
+ dq 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53
+ dq 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99
+ dq 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8
+ dq 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63
+ dq 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb
+ dq 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373
+ dq 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3
+ dq 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc
+ dq 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60
+ dq 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72
+ dq 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec
+ dq 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28
+ dq 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9
+ dq 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915
+ dq 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b
+ dq 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c
+ dq 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207
+ dq 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e
+ dq 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178
+ dq 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba
+ dq 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6
+ dq 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae
+ dq 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b
+ dq 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84
+ dq 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493
+ dq 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc
+ dq 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c
+ dq 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6
+ dq 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a
+ dq 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec
+ dq 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817
+
+align 32
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+ ;ddq 0x18191a1b1c1d1e1f1011121314151617
+ dq 0x1011121314151617, 0x18191a1b1c1d1e1f
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND rbx
+%define TBL r8
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+
+%define a ymm0
+%define b ymm1
+%define c ymm2
+%define d ymm3
+%define e ymm4
+%define f ymm5
+%define g ymm6
+%define h ymm7
+
+%define a0 ymm8
+%define a1 ymm9
+%define a2 ymm10
+
+%define TT0 ymm14
+%define TT1 ymm13
+%define TT2 ymm12
+%define TT3 ymm11
+%define TT4 ymm10
+%define TT5 ymm9
+
+%define T1 ymm14
+%define TMP ymm15
+
+
+
+%define SZ4 4*SHA512_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 80*SZ4
+
+; Define stack usage
+
+;; Assume stack aligned to 32 bytes before call
+;; Therefore FRAMESZ mod 32 must be 32-8 = 24
+struc stack_frame
+ .data resb 16*SZ4
+ .digest resb NUM_SHA512_DIGEST_WORDS*SZ4
+ .align resb 24
+endstruc
+
+%define _DIGEST stack_frame.digest
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORQ reg, imm, tmp
+; packed-rotate-right-double
+; does a rotate by doing two shifts and an or
+%macro PRORQ 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ vpsllq %%tmp, %%reg, (64-(%%imm))
+ vpsrlq %%reg, %%reg, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; non-destructive
+; PRORQ_nd reg, imm, tmp, src
+%macro PRORQ_nd 4
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+%define %%src %4
+ vpsllq %%tmp, %%src, (64-(%%imm))
+ vpsrlq %%reg, %%src, %%imm
+ vpor %%reg, %%reg, %%tmp
+%endmacro
+
+; PRORQ dst/src, amt
+%macro PRORQ 2
+ PRORQ %1, %2, TMP
+%endmacro
+
+; PRORQ_nd dst, src, amt
+%macro PRORQ_nd 3
+ PRORQ_nd %1, %3, TMP, %2
+%endmacro
+
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ PRORQ_nd a0, e, (18-14) ; sig1: a0 = (e >> 4)
+
+ vpxor a2, f, g ; ch: a2 = f^g
+ vpand a2, a2, e ; ch: a2 = (f^g)&e
+ vpxor a2, a2, g ; a2 = ch
+
+ PRORQ_nd a1, e, 41 ; sig1: a1 = (e >> 41)
+ vmovdqa [SZ4*(%%i&0xf) + rsp],%%T1
+ vpaddq %%T1,%%T1,[TBL + ROUND] ; T1 = W + K
+ vpxor a0, a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORQ a0, 14 ; sig1: a0 = (e >> 14) ^ (e >> 18)
+ vpaddq h, h, a2 ; h = h + ch
+ PRORQ_nd a2, a, (34-28) ; sig0: a2 = (a >> 6)
+ vpaddq h, h, %%T1 ; h = h + ch + W + K
+ vpxor a0, a0, a1 ; a0 = sigma1
+ vmovdqa %%T1, a ; maj: T1 = a
+ PRORQ_nd a1, a, 39 ; sig0: a1 = (a >> 39)
+ vpxor %%T1, %%T1, c ; maj: T1 = a^c
+ add ROUND, SZ4 ; ROUND++
+ vpand %%T1, %%T1, b ; maj: T1 = (a^c)&b
+ vpaddq h, h, a0
+
+ vpaddq d, d, h
+
+ vpxor a2, a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORQ a2, 28 ; sig0: a2 = (a >> 28) ^ (a >> 34)
+ vpxor a2, a2, a1 ; a2 = sig0
+ vpand a1, a, c ; maj: a1 = a&c
+ vpor a1, a1, %%T1 ; a1 = maj
+ vpaddq h, h, a1 ; h = h + ch + W + K + maj
+ vpaddq h, h, a2 ; h = h + ch + W + K + maj + sigma0
+ ROTATE_ARGS
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ vmovdqa %%T1, [SZ4*((%%i-15)&0xf) + rsp]
+ vmovdqa a1, [SZ4*((%%i-2)&0xf) + rsp]
+ vmovdqa a0, %%T1
+ PRORQ %%T1, 8-1
+ vmovdqa a2, a1
+ PRORQ a1, 61-19
+ vpxor %%T1, %%T1, a0
+ PRORQ %%T1, 1
+ vpxor a1, a1, a2
+ PRORQ a1, 19
+ vpsrlq a0, a0, 7
+ vpxor %%T1, %%T1, a0
+ vpsrlq a2, a2, 6
+ vpxor a1, a1, a2
+ vpaddq %%T1, %%T1, [SZ4*((%%i-16)&0xf) + rsp]
+ vpaddq a1, a1, [SZ4*((%%i-7)&0xf) + rsp]
+ vpaddq %%T1, %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+
+%endm
+
+
+;; void sha512_x4_avx2(void *STATE, const int INP_SIZE)
+;; arg 1 : STATE : pointer to input data
+;; arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+MKGLOBAL(sha512_x4_avx2,function,internal)
+align 32
+sha512_x4_avx2:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+
+ sub rsp, stack_frame_size
+
+ ;; Load the pre-transposed incoming digest.
+ vmovdqu a, [STATE+ 0*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu b, [STATE+ 1*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu c, [STATE+ 2*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu d, [STATE+ 3*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu e, [STATE+ 4*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu f, [STATE+ 5*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu g, [STATE+ 6*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu h, [STATE+ 7*SHA512_DIGEST_ROW_SIZE]
+
+ DBGPRINTL_YMM "sha512-avx2 Incoming digest", a, b, c, d, e, f, g, h
+ lea TBL,[K512_4]
+
+ ;; load the address of each of the MAX_LANES (4) message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha512 + 0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha512 + 1*PTR_SZ]
+ mov inp2,[STATE + _data_ptr_sha512 + 2*PTR_SZ]
+ mov inp3,[STATE + _data_ptr_sha512 + 3*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ ;; save old digest
+ vmovdqa [rsp + _DIGEST + 0*SZ4], a
+ vmovdqa [rsp + _DIGEST + 1*SZ4], b
+ vmovdqa [rsp + _DIGEST + 2*SZ4], c
+ vmovdqa [rsp + _DIGEST + 3*SZ4], d
+ vmovdqa [rsp + _DIGEST + 4*SZ4], e
+ vmovdqa [rsp + _DIGEST + 5*SZ4], f
+ vmovdqa [rsp + _DIGEST + 6*SZ4], g
+ vmovdqa [rsp + _DIGEST + 7*SZ4], h
+
+%assign i 0
+%rep 4
+ ;; load up the shuffler for little-endian to big-endian format
+ vmovdqa TMP, [PSHUFFLE_BYTE_FLIP_MASK]
+
+ TRANSPOSE4_U64_LOAD4 TT4, TT1, TT5, TT3, inp0, inp1, inp2, inp3, IDX+i*32
+
+ TRANSPOSE4_U64 TT4, TT1, TT5, TT3, TT0, TT2
+ DBGPRINTL_YMM "sha512-avx2 Incoming data", TT0, TT1, TT2, TT3
+ vpshufb TT0, TT0, TMP
+ vpshufb TT1, TT1, TMP
+ vpshufb TT2, TT2, TMP
+ vpshufb TT3, TT3, TMP
+ ROUND_00_15 TT0,(i*4+0)
+ ROUND_00_15 TT1,(i*4+1)
+ ROUND_00_15 TT2,(i*4+2)
+ ROUND_00_15 TT3,(i*4+3)
+%assign i (i+1)
+%endrep
+;; Increment IDX by message block size == 8 (loop) * 16 (XMM width in bytes)
+ add IDX, 4 * 32
+
+%assign i (i*4)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ vpaddq a, a, [rsp + _DIGEST + 0*SZ4]
+ vpaddq b, b, [rsp + _DIGEST + 1*SZ4]
+ vpaddq c, c, [rsp + _DIGEST + 2*SZ4]
+ vpaddq d, d, [rsp + _DIGEST + 3*SZ4]
+ vpaddq e, e, [rsp + _DIGEST + 4*SZ4]
+ vpaddq f, f, [rsp + _DIGEST + 5*SZ4]
+ vpaddq g, g, [rsp + _DIGEST + 6*SZ4]
+ vpaddq h, h, [rsp + _DIGEST + 7*SZ4]
+
+ sub INP_SIZE, 1 ;; consumed one message block
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ vmovdqu [STATE+ 0*SHA512_DIGEST_ROW_SIZE ],a
+ vmovdqu [STATE+ 1*SHA512_DIGEST_ROW_SIZE ],b
+ vmovdqu [STATE+ 2*SHA512_DIGEST_ROW_SIZE ],c
+ vmovdqu [STATE+ 3*SHA512_DIGEST_ROW_SIZE ],d
+ vmovdqu [STATE+ 4*SHA512_DIGEST_ROW_SIZE ],e
+ vmovdqu [STATE+ 5*SHA512_DIGEST_ROW_SIZE ],f
+ vmovdqu [STATE+ 6*SHA512_DIGEST_ROW_SIZE ],g
+ vmovdqu [STATE+ 7*SHA512_DIGEST_ROW_SIZE ],h
+ DBGPRINTL_YMM "sha512-avx2 Outgoing digest", a, b, c, d, e, f, g, h
+
+ ;; update input data pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha512 + 0*PTR_SZ], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha512 + 1*PTR_SZ], inp1
+ add inp2, IDX
+ mov [STATE + _data_ptr_sha512 + 2*PTR_SZ], inp2
+ add inp3, IDX
+ mov [STATE + _data_ptr_sha512 + 3*PTR_SZ], inp3
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame ((16 + 8)*32 bytes)
+%ifdef SAFE_DATA
+ vpxor ymm0, ymm0
+%assign i 0
+%rep (16+NUM_SHA512_DIGEST_WORDS)
+ vmovdqa [rsp + i*SZ4], ymm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, stack_frame_size
+
+ ; outer calling routine restores XMM and other GP registers
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx2/snow3g_avx2.c b/src/spdk/intel-ipsec-mb/avx2/snow3g_avx2.c
new file mode 100644
index 000000000..7945d026a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx2/snow3g_avx2.c
@@ -0,0 +1,49 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+
+#if defined (_WIN32) || defined (SAFE_LOOKUP)
+/* use AVX implementation on Windows for now or when SAFE_LOOKUP flag is set */
+#define AVX
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_avx
+#else
+#define AVX2
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_ymms
+#endif
+#define SNOW3G_F8_1_BUFFER_BIT snow3g_f8_1_buffer_bit_avx2
+#define SNOW3G_F8_1_BUFFER snow3g_f8_1_buffer_avx2
+#define SNOW3G_F8_2_BUFFER snow3g_f8_2_buffer_avx2
+#define SNOW3G_F8_4_BUFFER snow3g_f8_4_buffer_avx2
+#define SNOW3G_F8_8_BUFFER snow3g_f8_8_buffer_avx2
+#define SNOW3G_F8_N_BUFFER snow3g_f8_n_buffer_avx2
+#define SNOW3G_F8_8_BUFFER_MULTIKEY snow3g_f8_8_buffer_multikey_avx2
+#define SNOW3G_F8_N_BUFFER_MULTIKEY snow3g_f8_n_buffer_multikey_avx2
+#define SNOW3G_F9_1_BUFFER snow3g_f9_1_buffer_avx2
+#define SNOW3G_INIT_KEY_SCHED snow3g_init_key_sched_avx2
+#define SNOW3G_KEY_SCHED_SIZE snow3g_key_sched_size_avx2
+
+#include "include/snow3g_common.h"
diff --git a/src/spdk/intel-ipsec-mb/avx512/aes_cbc_dec_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/aes_cbc_dec_vaes_avx512.asm
new file mode 100644
index 000000000..ce33caa92
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/aes_cbc_dec_vaes_avx512.asm
@@ -0,0 +1,477 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/aes_common.asm"
+
+%define zIV zmm0
+%define zBLK_0_3 zmm1
+%define zBLK_4_7 zmm2
+%define zBLK_8_11 zmm3
+%define zBLK_12_15 zmm4
+%define zTMP0 zmm5
+%define zTMP1 zmm6
+%define zTMP2 zmm7
+%define zTMP3 zmm8
+
+%define ZKEY0 zmm17
+%define ZKEY1 zmm18
+%define ZKEY2 zmm19
+%define ZKEY3 zmm20
+%define ZKEY4 zmm21
+%define ZKEY5 zmm22
+%define ZKEY6 zmm23
+%define ZKEY7 zmm24
+%define ZKEY8 zmm25
+%define ZKEY9 zmm26
+%define ZKEY10 zmm27
+%define ZKEY11 zmm28
+%define ZKEY12 zmm29
+%define ZKEY13 zmm30
+%define ZKEY14 zmm31
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes rax
+%endif
+
+%define tmp r10
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; macro to preload keys
+;;; - uses ZKEY[0-14] registers (ZMM)
+%macro LOAD_KEYS 2
+%define %%KEYS %1 ; [in] key pointer
+%define %%NROUNDS %2 ; [in] numerical value, number of AES rounds
+ ; excluding 1st and last rounds.
+ ; Example: AES-128 -> value 9
+
+%assign i 0
+%rep (%%NROUNDS + 2)
+ vbroadcastf64x2 ZKEY %+ i, [%%KEYS + 16*i]
+%assign i (i + 1)
+%endrep
+
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; This macro is used to "cool down" pipeline after DECRYPT_16_PARALLEL macro
+;;; code as the number of final blocks is variable.
+;;; Processes the last %%num_final_blocks blocks (1 to 15, can't be 0)
+
+%macro FINAL_BLOCKS 14
+%define %%PLAIN_OUT %1 ; [in] output buffer
+%define %%CIPH_IN %2 ; [in] input buffer
+%define %%LAST_CIPH_BLK %3 ; [in/out] ZMM with IV/last cipher blk (in idx 3)
+%define %%num_final_blocks %4 ; [in] numerical value (1 - 15)
+%define %%CIPHER_PLAIN_0_3 %5 ; [out] ZMM next 0-3 cipher blocks
+%define %%CIPHER_PLAIN_4_7 %6 ; [out] ZMM next 4-7 cipher blocks
+%define %%CIPHER_PLAIN_8_11 %7 ; [out] ZMM next 8-11 cipher blocks
+%define %%CIPHER_PLAIN_12_15 %8 ; [out] ZMM next 12-15 cipher blocks
+%define %%ZT1 %9 ; [clobbered] ZMM temporary
+%define %%ZT2 %10 ; [clobbered] ZMM temporary
+%define %%ZT3 %11 ; [clobbered] ZMM temporary
+%define %%ZT4 %12 ; [clobbered] ZMM temporary
+%define %%IA0 %13 ; [clobbered] GP temporary
+%define %%NROUNDS %14 ; [in] number of rounds; numerical value
+
+ ;; load plain/cipher text
+ ZMM_LOAD_BLOCKS_0_16 %%num_final_blocks, %%CIPH_IN, 0, \
+ %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_4_7, \
+ %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_12_15
+
+ ;; Prepare final cipher text blocks to
+ ;; be XOR'd later after AESDEC
+ valignq %%ZT1, %%CIPHER_PLAIN_0_3, %%LAST_CIPH_BLK, 6
+%if %%num_final_blocks > 4
+ valignq %%ZT2, %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_0_3, 6
+%endif
+%if %%num_final_blocks > 8
+ valignq %%ZT3, %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_4_7, 6
+%endif
+%if %%num_final_blocks > 12
+ valignq %%ZT4, %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_8_11, 6
+%endif
+
+ ;; Update IV with last cipher block
+ ;; to be used later in DECRYPT_16_PARALLEL
+%if %%num_final_blocks == 1
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_0_3, 2
+%elif %%num_final_blocks == 2
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_0_3, 4
+%elif %%num_final_blocks == 3
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_0_3, 6
+%elif %%num_final_blocks == 4
+ vmovdqa64 %%LAST_CIPH_BLK, %%CIPHER_PLAIN_0_3
+%elif %%num_final_blocks == 5
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_4_7, 2
+%elif %%num_final_blocks == 6
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_4_7, 4
+%elif %%num_final_blocks == 7
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_4_7, 6
+%elif %%num_final_blocks == 8
+ vmovdqa64 %%LAST_CIPH_BLK, %%CIPHER_PLAIN_4_7
+%elif %%num_final_blocks == 9
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_8_11, 2
+%elif %%num_final_blocks == 10
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_8_11, 4
+%elif %%num_final_blocks == 11
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_8_11, 6
+%elif %%num_final_blocks == 12
+ vmovdqa64 %%LAST_CIPH_BLK, %%CIPHER_PLAIN_8_11
+%elif %%num_final_blocks == 13
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_12_15, 2
+%elif %%num_final_blocks == 14
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_12_15, 4
+%elif %%num_final_blocks == 15
+ valignq %%LAST_CIPH_BLK, %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_12_15, 6
+%endif
+
+ ;; AES rounds
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESDEC_ROUND_BLOCKS_0_16 %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_4_7, \
+ %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_12_15, \
+ ZKEY %+ j, j, no_data, no_data, no_data, no_data, \
+ %%num_final_blocks, %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+ ;; XOR with decrypted blocks to get plain text
+ vpxorq %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_0_3, %%ZT1
+%if %%num_final_blocks > 4
+ vpxorq %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_4_7, %%ZT2
+%endif
+%if %%num_final_blocks > 8
+ vpxorq %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_8_11, %%ZT3
+%endif
+%if %%num_final_blocks > 12
+ vpxorq %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_12_15, %%ZT4
+%endif
+
+ ;; write plain text back to output
+ ZMM_STORE_BLOCKS_0_16 %%num_final_blocks, %%PLAIN_OUT, 0, \
+ %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_4_7, \
+ %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_12_15
+
+%endmacro ; FINAL_BLOCKS
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Main AES-CBC decrypt macro
+;;; - operates on single stream
+;;; - decrypts 16 blocks at a time
+%macro DECRYPT_16_PARALLEL 14
+%define %%PLAIN_OUT %1 ; [in] output buffer
+%define %%CIPH_IN %2 ; [in] input buffer
+%define %%LENGTH %3 ; [in/out] number of bytes to process
+%define %%LAST_CIPH_BLK %4 ; [in/out] ZMM with IV (first block) or last cipher block (idx 3)
+%define %%CIPHER_PLAIN_0_3 %5 ; [out] ZMM next 0-3 cipher blocks
+%define %%CIPHER_PLAIN_4_7 %6 ; [out] ZMM next 4-7 cipher blocks
+%define %%CIPHER_PLAIN_8_11 %7 ; [out] ZMM next 8-11 cipher blocks
+%define %%CIPHER_PLAIN_12_15 %8 ; [out] ZMM next 12-15 cipher blocks
+%define %%ZT1 %9 ; [clobbered] ZMM temporary
+%define %%ZT2 %10 ; [clobbered] ZMM temporary
+%define %%ZT3 %11 ; [clobbered] ZMM temporary
+%define %%ZT4 %12 ; [clobbered] ZMM temporary
+%define %%NROUNDS %13 ; [in] number of rounds; numerical value
+%define %%IA0 %14 ; [clobbered] GP temporary
+
+ vmovdqu8 %%CIPHER_PLAIN_0_3, [%%CIPH_IN]
+ vmovdqu8 %%CIPHER_PLAIN_4_7, [%%CIPH_IN + 64]
+ vmovdqu8 %%CIPHER_PLAIN_8_11, [%%CIPH_IN + 128]
+ vmovdqu8 %%CIPHER_PLAIN_12_15, [%%CIPH_IN + 192]
+
+ ;; prepare first set of cipher blocks for later XOR'ing
+ valignq %%ZT1, %%CIPHER_PLAIN_0_3, %%LAST_CIPH_BLK, 6
+ valignq %%ZT2, %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_0_3, 6
+ valignq %%ZT3, %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_4_7, 6
+ valignq %%ZT4, %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_8_11, 6
+
+ ;; store last cipher text block to be used for next 16 blocks
+ vmovdqa64 %%LAST_CIPH_BLK, %%CIPHER_PLAIN_12_15
+
+ ;; AES rounds
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESDEC_ROUND_BLOCKS_0_16 %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_4_7, \
+ %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_12_15, \
+ ZKEY %+ j, j, no_data, no_data, no_data, no_data, \
+ 16, %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+ ;; XOR with decrypted blocks to get plain text
+ vpxorq %%CIPHER_PLAIN_0_3, %%CIPHER_PLAIN_0_3, %%ZT1
+ vpxorq %%CIPHER_PLAIN_4_7, %%CIPHER_PLAIN_4_7, %%ZT2
+ vpxorq %%CIPHER_PLAIN_8_11, %%CIPHER_PLAIN_8_11, %%ZT3
+ vpxorq %%CIPHER_PLAIN_12_15, %%CIPHER_PLAIN_12_15, %%ZT4
+
+ ;; write plain text back to output
+ vmovdqu8 [%%PLAIN_OUT], %%CIPHER_PLAIN_0_3
+ vmovdqu8 [%%PLAIN_OUT + 64], %%CIPHER_PLAIN_4_7
+ vmovdqu8 [%%PLAIN_OUT + 128], %%CIPHER_PLAIN_8_11
+ vmovdqu8 [%%PLAIN_OUT + 192], %%CIPHER_PLAIN_12_15
+
+ ;; adjust input pointer and length
+ sub %%LENGTH, (16 * 16)
+ add %%CIPH_IN, (16 * 16)
+ add %%PLAIN_OUT, (16 * 16)
+
+%endmacro ; DECRYPT_16_PARALLEL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; AES_CBC_DEC macro decrypts given data.
+;;; Flow:
+;;; - Decrypt all blocks (multiple of 16) up to final 1-15 blocks
+;;; - Decrypt final blocks (1-15 blocks)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro AES_CBC_DEC 7
+%define %%CIPH_IN %1 ;; [in] pointer to input buffer
+%define %%PLAIN_OUT %2 ;; [in] pointer to output buffer
+%define %%KEYS %3 ;; [in] pointer to expanded keys
+%define %%IV %4 ;; [in] pointer to IV
+%define %%LENGTH %5 ;; [in/out] GP register with length in bytes
+%define %%NROUNDS %6 ;; [in] Number of AES rounds; numerical value
+%define %%TMP %7 ;; [clobbered] GP register
+
+ cmp %%LENGTH, 0
+ je %%cbc_dec_done
+
+ vinserti64x2 zIV, zIV, [%%IV], 3
+
+ ;; preload keys
+ LOAD_KEYS %%KEYS, %%NROUNDS
+
+%%decrypt_16_parallel:
+ cmp %%LENGTH, 256
+ jb %%final_blocks
+
+ DECRYPT_16_PARALLEL %%PLAIN_OUT, %%CIPH_IN, %%LENGTH, zIV, \
+ zBLK_0_3, zBLK_4_7, zBLK_8_11, zBLK_12_15, \
+ zTMP0, zTMP1, zTMP2, zTMP3, %%NROUNDS, %%TMP
+ jmp %%decrypt_16_parallel
+
+%%final_blocks:
+ ;; get num final blocks
+ shr %%LENGTH, 4
+ and %%LENGTH, 0xf
+ je %%cbc_dec_done
+
+ cmp %%LENGTH, 8
+ je %%final_num_blocks_is_8
+ jl %%final_blocks_is_1_7
+
+ ; Final blocks 9-15
+ cmp %%LENGTH, 12
+ je %%final_num_blocks_is_12
+ jl %%final_blocks_is_9_11
+
+ ; Final blocks 13-15
+ cmp %%LENGTH, 15
+ je %%final_num_blocks_is_15
+ cmp %%LENGTH, 14
+ je %%final_num_blocks_is_14
+ cmp %%LENGTH, 13
+ je %%final_num_blocks_is_13
+
+%%final_blocks_is_9_11:
+ cmp %%LENGTH, 11
+ je %%final_num_blocks_is_11
+ cmp %%LENGTH, 10
+ je %%final_num_blocks_is_10
+ cmp %%LENGTH, 9
+ je %%final_num_blocks_is_9
+
+%%final_blocks_is_1_7:
+ cmp %%LENGTH, 4
+ je %%final_num_blocks_is_4
+ jl %%final_blocks_is_1_3
+
+ ; Final blocks 5-7
+ cmp %%LENGTH, 7
+ je %%final_num_blocks_is_7
+ cmp %%LENGTH, 6
+ je %%final_num_blocks_is_6
+ cmp %%LENGTH, 5
+ je %%final_num_blocks_is_5
+
+%%final_blocks_is_1_3:
+ cmp %%LENGTH, 3
+ je %%final_num_blocks_is_3
+ cmp %%LENGTH, 2
+ je %%final_num_blocks_is_2
+ jmp %%final_num_blocks_is_1
+
+
+%%final_num_blocks_is_15:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 15, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_14:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 14, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_13:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 13, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_12:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 12, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_11:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 11, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_10:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 10, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_9:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 9, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_8:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 8, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_7:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 7, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_6:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 6, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_5:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 5, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_4:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 4, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_3:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 3, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_2:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 2, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+ jmp %%cbc_dec_done
+
+%%final_num_blocks_is_1:
+ FINAL_BLOCKS %%PLAIN_OUT, %%CIPH_IN, zIV, 1, zBLK_0_3, zBLK_4_7, \
+ zBLK_8_11, zBLK_12_15, zTMP0, zTMP1, zTMP2, zTMP3, \
+ %%TMP, %%NROUNDS
+
+%%cbc_dec_done:
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+section .text
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; aes_cbc_dec_128_vaes_avx512(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_dec_128_vaes_avx512,function,internal)
+aes_cbc_dec_128_vaes_avx512:
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+ AES_CBC_DEC p_in, p_out, p_keys, p_IV, num_bytes, 9, tmp
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; aes_cbc_dec_192_vaes_avx512(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_dec_192_vaes_avx512,function,internal)
+aes_cbc_dec_192_vaes_avx512:
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+ AES_CBC_DEC p_in, p_out, p_keys, p_IV, num_bytes, 11, tmp
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; aes_cbc_dec_256_vaes_avx512(void *in, void *IV, void *keys, void *out, UINT64 num_bytes)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_dec_256_vaes_avx512,function,internal)
+aes_cbc_dec_256_vaes_avx512:
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+ AES_CBC_DEC p_in, p_out, p_keys, p_IV, num_bytes, 13, tmp
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
diff --git a/src/spdk/intel-ipsec-mb/avx512/aes_cbc_enc_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/aes_cbc_enc_vaes_avx512.asm
new file mode 100644
index 000000000..c4b1dd561
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/aes_cbc_enc_vaes_avx512.asm
@@ -0,0 +1,727 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routines to do 128/192/256 bit CBC AES encrypt
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+struc STACK
+_gpr_save: resq 3
+endstruc
+
+%define GPR_SAVE_AREA rsp + _gpr_save
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define ARG arg1
+%define LEN arg2
+
+%define IA0 rax
+%define IA1 rbx
+%define IA2 arg3
+%define IA3 arg4
+%define IA4 rbp
+%define IA5 r8
+%define IA6 r9
+%define IA7 r10
+%define IA8 r11
+%define IA9 r13
+%define IA10 r14
+%define IA11 r15
+%define IA12 r12
+
+%define ZIV00_03 zmm8
+%define ZIV04_07 zmm9
+%define ZIV08_11 zmm10
+%define ZIV12_15 zmm11
+
+%define ZT0 zmm16
+%define ZT1 zmm17
+%define ZT2 zmm18
+%define ZT3 zmm19
+%define ZT4 zmm20
+%define ZT5 zmm21
+%define ZT6 zmm22
+%define ZT7 zmm23
+%define ZT8 zmm24
+%define ZT9 zmm25
+%define ZT10 zmm26
+%define ZT11 zmm27
+%define ZT12 zmm28
+%define ZT13 zmm29
+%define ZT14 zmm30
+%define ZT15 zmm31
+
+%define ZT16 zmm12
+%define ZT17 zmm13
+%define ZT18 zmm14
+%define ZT19 zmm15
+
+%define TAB_A0B0A1B1 zmm6
+%define TAB_A2B2A3B3 zmm7
+
+;; Save registers states
+%macro FUNC_SAVE 0
+ sub rsp, STACK_size
+ mov [GPR_SAVE_AREA + 8*0], rbp
+%ifndef LINUX
+ mov [GPR_SAVE_AREA + 8*1], rsi
+ mov [GPR_SAVE_AREA + 8*2], rdi
+%endif
+%endmacro
+
+;; Restore register states
+%macro FUNC_RESTORE 0
+ ;; XMMs are saved at a higher level
+ mov rbp, [GPR_SAVE_AREA + 8*0]
+%ifndef LINUX
+ mov rsi, [GPR_SAVE_AREA + 8*1]
+ mov rdi, [GPR_SAVE_AREA + 8*2]
+%endif
+ add rsp, STACK_size
+ vzeroupper
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Transpose macro - executes 4x4 transpose of 4 ZMM registers
+; in: L0B0-3 out: B0L0-3
+; L1B0-3 B1L0-3
+; L2B0-3 B2L0-3
+; L3B0-3 B3L0-3
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro TRANSPOSE_4x4 8
+%define %%IN_OUT_0 %1
+%define %%IN_OUT_1 %2
+%define %%IN_OUT_2 %3
+%define %%IN_OUT_3 %4
+%define %%ZTMP_0 %5
+%define %%ZTMP_1 %6
+%define %%ZTMP_2 %7
+%define %%ZTMP_3 %8
+
+ vmovdqa64 %%ZTMP_0, TAB_A0B0A1B1
+ vmovdqa64 %%ZTMP_1, %%ZTMP_0
+ vmovdqa64 %%ZTMP_2, TAB_A2B2A3B3
+ vmovdqa64 %%ZTMP_3, %%ZTMP_2
+
+ vpermi2q %%ZTMP_0, %%IN_OUT_0, %%IN_OUT_1
+ vpermi2q %%ZTMP_1, %%IN_OUT_2, %%IN_OUT_3
+ vpermi2q %%ZTMP_2, %%IN_OUT_0, %%IN_OUT_1
+ vpermi2q %%ZTMP_3, %%IN_OUT_2, %%IN_OUT_3
+
+ vshufi64x2 %%IN_OUT_0, %%ZTMP_0, %%ZTMP_1, 0x44
+ vshufi64x2 %%IN_OUT_2, %%ZTMP_2, %%ZTMP_3, 0x44
+ vshufi64x2 %%IN_OUT_1, %%ZTMP_0, %%ZTMP_1, 0xee
+ vshufi64x2 %%IN_OUT_3, %%ZTMP_2, %%ZTMP_3, 0xee
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; LOAD_STORE - loads/stores 1-4 blocks (16 bytes) for 4 lanes into ZMM registers
+; - Loads 4 blocks by default
+; - Pass %%MASK_REG argument to load/store 1-3 blocks (optional)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro LOAD_STORE_x4 15-16
+%define %%LANE_A %1 ; [in] lane index to load/store (numerical)
+%define %%LANE_B %2 ; [in] lane index to load/store (numerical)
+%define %%LANE_C %3 ; [in] lane index to load/store (numerical)
+%define %%LANE_D %4 ; [in] lane index to load/store (numerical)
+%define %%DATA_PTR %5 ; [in] GP reg with ptr to lane input table
+%define %%OFFSET %6 ; [in] GP reg input/output buffer offset
+%define %%ZDATA0 %7 ; [in/out] ZMM reg to load/store data
+%define %%ZDATA1 %8 ; [in/out] ZMM reg to load/store data
+%define %%ZDATA2 %9 ; [in/out] ZMM reg to load/store data
+%define %%ZDATA3 %10 ; [in/out] ZMM reg to load/store data
+%define %%GP0 %11 ; [clobbered] tmp GP reg
+%define %%GP1 %12 ; [clobbered] tmp GP reg
+%define %%GP2 %13 ; [clobbered] tmp GP reg
+%define %%GP3 %14 ; [clobbered] tmp GP reg
+%define %%LOAD_STORE %15 ; [in] string value to select LOAD or STORE
+%define %%MASK_REG %16 ; [in] mask reg used for load/store mask
+%define %%NUM_ARGS %0
+
+ mov %%GP0, [%%DATA_PTR + 8*(%%LANE_A)]
+ mov %%GP1, [%%DATA_PTR + 8*(%%LANE_B)]
+ mov %%GP2, [%%DATA_PTR + 8*(%%LANE_C)]
+ mov %%GP3, [%%DATA_PTR + 8*(%%LANE_D)]
+
+%if %%NUM_ARGS <= 15 ;; %%MASK_REG not set, assume 4 block load/store
+%ifidn %%LOAD_STORE, LOAD
+ vmovdqu8 %%ZDATA0, [%%GP0 + %%OFFSET]
+ vmovdqu8 %%ZDATA1, [%%GP1 + %%OFFSET]
+ vmovdqu8 %%ZDATA2, [%%GP2 + %%OFFSET]
+ vmovdqu8 %%ZDATA3, [%%GP3 + %%OFFSET]
+%else ; STORE8
+ vmovdqu8 [%%GP0 + %%OFFSET], %%ZDATA0
+ vmovdqu8 [%%GP1 + %%OFFSET], %%ZDATA1
+ vmovdqu8 [%%GP2 + %%OFFSET], %%ZDATA2
+ vmovdqu8 [%%GP3 + %%OFFSET], %%ZDATA3
+%endif
+%else ;; %%MASK_REG argument passed - 1, 2, or 3 block load/store
+%ifidn %%LOAD_STORE, LOAD
+ vmovdqu8 %%ZDATA0{%%MASK_REG}{z}, [%%GP0 + %%OFFSET]
+ vmovdqu8 %%ZDATA1{%%MASK_REG}{z}, [%%GP1 + %%OFFSET]
+ vmovdqu8 %%ZDATA2{%%MASK_REG}{z}, [%%GP2 + %%OFFSET]
+ vmovdqu8 %%ZDATA3{%%MASK_REG}{z}, [%%GP3 + %%OFFSET]
+%else ; STORE
+ vmovdqu8 [%%GP0 + %%OFFSET]{%%MASK_REG}, %%ZDATA0
+ vmovdqu8 [%%GP1 + %%OFFSET]{%%MASK_REG}, %%ZDATA1
+ vmovdqu8 [%%GP2 + %%OFFSET]{%%MASK_REG}, %%ZDATA2
+ vmovdqu8 [%%GP3 + %%OFFSET]{%%MASK_REG}, %%ZDATA3
+%endif
+%endif ;; %%NUM_ARGS
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; AESENC_ROUNDS_x16 macro
+; - 16 lanes, 1 block per lane
+; - it handles special cases: the last and zero rounds
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro AESENC_ROUNDS_x16 5
+%define %%L00_03 %1 ; [in/out] ZMM with lane 0-3 blocks
+%define %%L04_07 %2 ; [in/out] ZMM with lane 4-7 blocks
+%define %%L08_11 %3 ; [in/out] ZMM with lane 8-11 blocks
+%define %%L12_15 %4 ; [in/out] ZMM with lane 12-15 blocks
+%define %%NROUNDS %5 ; [in] number of aes rounds
+
+%define %%KP ARG + _aesarg_key_tab
+%define K00_03_OFFSET 0
+%define K04_07_OFFSET 64
+%define K08_11_OFFSET 128
+%define K12_15_OFFSET 192
+
+%assign ROUND 0
+%rep (%%NROUNDS + 2)
+
+%if ROUND < 1
+ ;; XOR with key 0 before doing aesenc
+ vpxorq %%L00_03, [%%KP + K00_03_OFFSET + ROUND * (16*16)]
+ vpxorq %%L04_07, [%%KP + K04_07_OFFSET + ROUND * (16*16)]
+ vpxorq %%L08_11, [%%KP + K08_11_OFFSET + ROUND * (16*16)]
+ vpxorq %%L12_15, [%%KP + K12_15_OFFSET + ROUND * (16*16)]
+%else
+%if ROUND <= %%NROUNDS
+
+ ;; rounds 1 to 9/11/13
+ vaesenc %%L00_03, %%L00_03, [%%KP + K00_03_OFFSET + ROUND * (16*16)]
+ vaesenc %%L04_07, %%L04_07, [%%KP + K04_07_OFFSET + ROUND * (16*16)]
+ vaesenc %%L08_11, %%L08_11, [%%KP + K08_11_OFFSET + ROUND * (16*16)]
+ vaesenc %%L12_15, %%L12_15, [%%KP + K12_15_OFFSET + ROUND * (16*16)]
+%else
+ ;; the last round
+ vaesenclast %%L00_03, %%L00_03, [%%KP + K00_03_OFFSET + ROUND * (16*16)]
+ vaesenclast %%L04_07, %%L04_07, [%%KP + K04_07_OFFSET + ROUND * (16*16)]
+ vaesenclast %%L08_11, %%L08_11, [%%KP + K08_11_OFFSET + ROUND * (16*16)]
+ vaesenclast %%L12_15, %%L12_15, [%%KP + K12_15_OFFSET + ROUND * (16*16)]
+%endif
+%endif
+
+%assign ROUND (ROUND + 1)
+%endrep
+
+%endmacro ; AESENC_ROUNDS_x16
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; ENCRYPT_16_PARALLEL - Encode all blocks up to multiple of 4
+; - Operation
+; - loop encrypting %%LENGTH bytes of input data
+; - each loop encrypts 4 blocks across 16 lanes
+; - stop when %%LENGTH is less than 64 bytes (4 blocks)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro ENCRYPT_16_PARALLEL 31
+%define %%ZIV00_03 %1 ;; [in] lane 0-3 IVs
+%define %%ZIV04_07 %2 ;; [in] lane 4-7 IVs
+%define %%ZIV08_11 %3 ;; [in] lane 8-11 IVs
+%define %%ZIV12_15 %4 ;; [in] lane 12-15 IVs
+%define %%LENGTH %5 ;; [in/out] GP register with length in bytes
+%define %%NROUNDS %6 ;; [in] Number of AES rounds; numerical value
+%define %%IDX %7 ;; [clobbered] GP reg to maintain idx
+%define %%B0L00_03 %8 ;; [clobbered] tmp ZMM register
+%define %%B0L04_07 %9 ;; [clobbered] tmp ZMM register
+%define %%B0L08_11 %10 ;; [clobbered] tmp ZMM register
+%define %%B0L12_15 %11 ;; [clobbered] tmp ZMM register
+%define %%B1L00_03 %12 ;; [clobbered] tmp ZMM register
+%define %%B1L04_07 %13 ;; [clobbered] tmp ZMM register
+%define %%B1L08_11 %14 ;; [clobbered] tmp ZMM register
+%define %%B1L12_15 %15 ;; [clobbered] tmp ZMM register
+%define %%B2L00_03 %16 ;; [clobbered] tmp ZMM register
+%define %%B2L04_07 %17 ;; [clobbered] tmp ZMM register
+%define %%B2L08_11 %18 ;; [clobbered] tmp ZMM register
+%define %%B2L12_15 %19 ;; [clobbered] tmp ZMM register
+%define %%B3L00_03 %20 ;; [clobbered] tmp ZMM register
+%define %%B3L04_07 %21 ;; [clobbered] tmp ZMM register
+%define %%B3L08_11 %22 ;; [clobbered] tmp ZMM register
+%define %%B3L12_15 %23 ;; [clobbered] tmp ZMM register
+%define %%ZTMP0 %24 ;; [clobbered] tmp ZMM register
+%define %%ZTMP1 %25 ;; [clobbered] tmp ZMM register
+%define %%ZTMP2 %26 ;; [clobbered] tmp ZMM register
+%define %%ZTMP3 %27 ;; [clobbered] tmp ZMM register
+%define %%TMP0 %28 ;; [clobbered] tmp GP register
+%define %%TMP1 %29 ;; [clobbered] tmp GP register
+%define %%TMP2 %30 ;; [clobbered] tmp GP register
+%define %%TMP3 %31 ;; [clobbered] tmp GP register
+
+%define %%IN ARG + _aesarg_in
+%define %%OUT ARG + _aesarg_out
+
+ ;; check for at least 4 blocks
+ cmp %%LENGTH, 64
+ jl %%encrypt_16_done
+
+ xor %%IDX, %%IDX
+ ;; skip length check on first loop
+ jmp %%encrypt_16_first
+
+%%encrypt_16_start:
+ cmp %%LENGTH, 64
+ jl %%encrypt_16_end
+
+%%encrypt_16_first:
+ ;; load 4 plaintext blocks for lanes 0-3
+ LOAD_STORE_x4 0, 1, 2, 3, %%IN, %%IDX, %%B0L00_03, %%B1L00_03, \
+ %%B2L00_03, %%B3L00_03, %%TMP0, %%TMP1, %%TMP2, %%TMP3, LOAD
+
+ TRANSPOSE_4x4 %%B0L00_03, %%B1L00_03, %%B2L00_03, %%B3L00_03, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 4-7
+ LOAD_STORE_x4 4, 5, 6, 7, %%IN, %%IDX, %%B0L04_07, %%B1L04_07, \
+ %%B2L04_07, %%B3L04_07, %%TMP0, %%TMP1, %%TMP2, %%TMP3, LOAD
+
+ TRANSPOSE_4x4 %%B0L04_07, %%B1L04_07, %%B2L04_07, %%B3L04_07, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 8-11
+ LOAD_STORE_x4 8, 9, 10, 11, %%IN, %%IDX, %%B0L08_11, %%B1L08_11, \
+ %%B2L08_11, %%B3L08_11, %%TMP0, %%TMP1, %%TMP2, %%TMP3, LOAD
+
+ TRANSPOSE_4x4 %%B0L08_11, %%B1L08_11, %%B2L08_11, %%B3L08_11, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 12-15
+ LOAD_STORE_x4 12, 13, 14, 15, %%IN, %%IDX, %%B0L12_15, %%B1L12_15, \
+ %%B2L12_15, %%B3L12_15, %%TMP0, %%TMP1, %%TMP2, %%TMP3, LOAD
+
+ TRANSPOSE_4x4 %%B0L12_15, %%B1L12_15, %%B2L12_15, %%B3L12_15, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; xor first plaintext block with IV
+ vpxorq %%B0L00_03, %%ZIV00_03
+ vpxorq %%B0L04_07, %%ZIV04_07
+ vpxorq %%B0L08_11, %%ZIV08_11
+ vpxorq %%B0L12_15, %%ZIV12_15
+
+ ;; encrypt block 0 lanes
+ AESENC_ROUNDS_x16 %%B0L00_03, %%B0L04_07, %%B0L08_11, %%B0L12_15, %%NROUNDS
+
+ ;; xor plaintext block with last cipher block
+ vpxorq %%B1L00_03, %%B0L00_03
+ vpxorq %%B1L04_07, %%B0L04_07
+ vpxorq %%B1L08_11, %%B0L08_11
+ vpxorq %%B1L12_15, %%B0L12_15
+
+ ;; encrypt block 1 lanes
+ AESENC_ROUNDS_x16 %%B1L00_03, %%B1L04_07, %%B1L08_11, %%B1L12_15, %%NROUNDS
+
+ ;; xor plaintext block with last cipher block
+ vpxorq %%B2L00_03, %%B1L00_03
+ vpxorq %%B2L04_07, %%B1L04_07
+ vpxorq %%B2L08_11, %%B1L08_11
+ vpxorq %%B2L12_15, %%B1L12_15
+
+ ;; encrypt block 2 lanes
+ AESENC_ROUNDS_x16 %%B2L00_03, %%B2L04_07, %%B2L08_11, %%B2L12_15, %%NROUNDS
+
+ ;; xor plaintext block with last cipher block
+ vpxorq %%B3L00_03, %%B2L00_03
+ vpxorq %%B3L04_07, %%B2L04_07
+ vpxorq %%B3L08_11, %%B2L08_11
+ vpxorq %%B3L12_15, %%B2L12_15
+
+ ;; encrypt block 3 lanes
+ AESENC_ROUNDS_x16 %%B3L00_03, %%B3L04_07, %%B3L08_11, %%B3L12_15, %%NROUNDS
+
+ ;; store last cipher block
+ vmovdqa64 %%ZIV00_03, %%B3L00_03
+ vmovdqa64 %%ZIV04_07, %%B3L04_07
+ vmovdqa64 %%ZIV08_11, %%B3L08_11
+ vmovdqa64 %%ZIV12_15, %%B3L12_15
+
+ ;; write back cipher text for lanes 0-3
+ TRANSPOSE_4x4 %%B0L00_03, %%B1L00_03, %%B2L00_03, %%B3L00_03, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 0, 1, 2, 3, %%OUT, %%IDX, %%B0L00_03, %%B1L00_03, \
+ %%B2L00_03, %%B3L00_03, %%TMP0, %%TMP1, %%TMP2, %%TMP3, STORE
+
+ ;; write back cipher text for lanes 4-7
+ TRANSPOSE_4x4 %%B0L04_07, %%B1L04_07, %%B2L04_07, %%B3L04_07, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 4, 5, 6, 7, %%OUT, %%IDX, %%B0L04_07, %%B1L04_07, \
+ %%B2L04_07, %%B3L04_07, %%TMP0, %%TMP1, %%TMP2, %%TMP3, STORE
+
+ ;; write back cipher text for lanes 8-11
+ TRANSPOSE_4x4 %%B0L08_11, %%B1L08_11, %%B2L08_11, %%B3L08_11, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 8, 9, 10, 11, %%OUT, %%IDX, %%B0L08_11, %%B1L08_11, \
+ %%B2L08_11, %%B3L08_11, %%TMP0, %%TMP1, %%TMP2, %%TMP3, STORE
+
+ ;; write back cipher text for lanes 12-15
+ TRANSPOSE_4x4 %%B0L12_15, %%B1L12_15, %%B2L12_15, %%B3L12_15, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 12, 13, 14, 15, %%OUT, %%IDX, %%B0L12_15, %%B1L12_15, \
+ %%B2L12_15, %%B3L12_15, %%TMP0, %%TMP1, %%TMP2, %%TMP3, STORE
+
+ sub %%LENGTH, 64
+ add %%IDX, 64
+ jmp %%encrypt_16_start
+
+%%encrypt_16_end:
+ ;; update in/out pointers
+ vpbroadcastq %%ZTMP2, %%IDX
+ vpaddq %%ZTMP0, %%ZTMP2, [%%IN]
+ vpaddq %%ZTMP1, %%ZTMP2, [%%IN + 64]
+ vmovdqa64 [%%IN], %%ZTMP0
+ vmovdqa64 [%%IN + 64], %%ZTMP1
+
+ vpaddq %%ZTMP0, %%ZTMP2, [%%OUT]
+ vpaddq %%ZTMP1, %%ZTMP2, [%%OUT + 64]
+ vmovdqa64 [%%OUT], %%ZTMP0
+ vmovdqa64 [%%OUT + 64], %%ZTMP1
+
+%%encrypt_16_done:
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; ENCRYPT_16_FINAL Encodes final blocks (less than 4) across 16 lanes
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro ENCRYPT_16_FINAL 31
+%define %%ZIV00_03 %1 ;; [in] lane 0-3 IVs
+%define %%ZIV04_07 %2 ;; [in] lane 4-7 IVs
+%define %%ZIV08_11 %3 ;; [in] lane 8-11 IVs
+%define %%ZIV12_15 %4 ;; [in] lane 12-15 IVs
+%define %%NROUNDS %5 ;; [in] Number of AES rounds; numerical value
+%define %%IDX %6 ;; [clobbered] GP reg to maintain idx
+%define %%B0L00_03 %7 ;; [clobbered] tmp ZMM register
+%define %%B0L04_07 %8 ;; [clobbered] tmp ZMM register
+%define %%B0L08_11 %9 ;; [clobbered] tmp ZMM register
+%define %%B0L12_15 %10 ;; [clobbered] tmp ZMM register
+%define %%B1L00_03 %11 ;; [clobbered] tmp ZMM register
+%define %%B1L04_07 %12 ;; [clobbered] tmp ZMM register
+%define %%B1L08_11 %13 ;; [clobbered] tmp ZMM register
+%define %%B1L12_15 %14 ;; [clobbered] tmp ZMM register
+%define %%B2L00_03 %15 ;; [clobbered] tmp ZMM register
+%define %%B2L04_07 %16 ;; [clobbered] tmp ZMM register
+%define %%B2L08_11 %17 ;; [clobbered] tmp ZMM register
+%define %%B2L12_15 %18 ;; [clobbered] tmp ZMM register
+%define %%B3L00_03 %19 ;; [clobbered] tmp ZMM register
+%define %%B3L04_07 %20 ;; [clobbered] tmp ZMM register
+%define %%B3L08_11 %21 ;; [clobbered] tmp ZMM register
+%define %%B3L12_15 %22 ;; [clobbered] tmp ZMM register
+%define %%ZTMP0 %23 ;; [clobbered] tmp ZMM register
+%define %%ZTMP1 %24 ;; [clobbered] tmp ZMM register
+%define %%ZTMP2 %25 ;; [clobbered] tmp ZMM register
+%define %%ZTMP3 %26 ;; [clobbered] tmp ZMM register
+%define %%TMP0 %27 ;; [clobbered] tmp GP register
+%define %%TMP1 %28 ;; [clobbered] tmp GP register
+%define %%TMP2 %29 ;; [clobbered] tmp GP register
+%define %%TMP3 %30 ;; [clobbered] tmp GP register
+%define %%NUM_BLKS %31 ;; [in] number of blocks (numerical value)
+
+%define %%IN ARG + _aesarg_in
+%define %%OUT ARG + _aesarg_out
+
+%if %%NUM_BLKS == 1
+ mov %%TMP0, 0x0000_0000_0000_ffff
+ kmovq k1, %%TMP0
+%elif %%NUM_BLKS == 2
+ mov %%TMP0, 0x0000_0000_ffff_ffff
+ kmovq k1, %%TMP0
+%elif %%NUM_BLKS == 3
+ mov %%TMP0, 0x0000_ffff_ffff_ffff
+ kmovq k1, %%TMP0
+%endif
+ xor %%IDX, %%IDX
+
+ ;; load 4 plaintext blocks for lanes 0-3
+ LOAD_STORE_x4 0, 1, 2, 3, %%IN, %%IDX, %%B0L00_03, %%B1L00_03, \
+ %%B2L00_03, %%B3L00_03, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, LOAD, k1
+
+ TRANSPOSE_4x4 %%B0L00_03, %%B1L00_03, %%B2L00_03, %%B3L00_03, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 4-7
+ LOAD_STORE_x4 4, 5, 6, 7, %%IN, %%IDX, %%B0L04_07, %%B1L04_07, \
+ %%B2L04_07, %%B3L04_07, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, LOAD, k1
+
+ TRANSPOSE_4x4 %%B0L04_07, %%B1L04_07, %%B2L04_07, %%B3L04_07, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 8-11
+ LOAD_STORE_x4 8, 9, 10, 11, %%IN, %%IDX, %%B0L08_11, %%B1L08_11, \
+ %%B2L08_11, %%B3L08_11, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, LOAD, k1
+
+ TRANSPOSE_4x4 %%B0L08_11, %%B1L08_11, %%B2L08_11, %%B3L08_11, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; load 4 plaintext blocks for lanes 12-15
+ LOAD_STORE_x4 12, 13, 14, 15, %%IN, %%IDX, %%B0L12_15, %%B1L12_15, \
+ %%B2L12_15, %%B3L12_15, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, LOAD, k1
+
+ TRANSPOSE_4x4 %%B0L12_15, %%B1L12_15, %%B2L12_15, %%B3L12_15, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ ;; xor plaintext block with IV
+ vpxorq %%B0L00_03, %%ZIV00_03
+ vpxorq %%B0L04_07, %%ZIV04_07
+ vpxorq %%B0L08_11, %%ZIV08_11
+ vpxorq %%B0L12_15, %%ZIV12_15
+
+ ;; encrypt block 0 lanes
+ AESENC_ROUNDS_x16 %%B0L00_03, %%B0L04_07, %%B0L08_11, %%B0L12_15, %%NROUNDS
+
+%if %%NUM_BLKS == 1
+ ;; store last cipher block
+ vmovdqa64 %%ZIV00_03, %%B0L00_03
+ vmovdqa64 %%ZIV04_07, %%B0L04_07
+ vmovdqa64 %%ZIV08_11, %%B0L08_11
+ vmovdqa64 %%ZIV12_15, %%B0L12_15
+%endif
+
+%if %%NUM_BLKS >= 2
+ ;; xor plaintext block with last cipher block
+ vpxorq %%B1L00_03, %%B0L00_03
+ vpxorq %%B1L04_07, %%B0L04_07
+ vpxorq %%B1L08_11, %%B0L08_11
+ vpxorq %%B1L12_15, %%B0L12_15
+
+ ;; encrypt block 1 lanes
+ AESENC_ROUNDS_x16 %%B1L00_03, %%B1L04_07, %%B1L08_11, %%B1L12_15, %%NROUNDS
+%endif
+%if %%NUM_BLKS == 2
+ ;; store last cipher block
+ vmovdqa64 %%ZIV00_03, %%B1L00_03
+ vmovdqa64 %%ZIV04_07, %%B1L04_07
+ vmovdqa64 %%ZIV08_11, %%B1L08_11
+ vmovdqa64 %%ZIV12_15, %%B1L12_15
+%endif
+
+%if %%NUM_BLKS >= 3
+ ;; xor plaintext block with last cipher block
+ vpxorq %%B2L00_03, %%B1L00_03
+ vpxorq %%B2L04_07, %%B1L04_07
+ vpxorq %%B2L08_11, %%B1L08_11
+ vpxorq %%B2L12_15, %%B1L12_15
+
+ ;; encrypt block 2 lanes
+ AESENC_ROUNDS_x16 %%B2L00_03, %%B2L04_07, %%B2L08_11, %%B2L12_15, %%NROUNDS
+%endif
+%if %%NUM_BLKS == 3
+ ;; store last cipher block
+ vmovdqa64 %%ZIV00_03, %%B2L00_03
+ vmovdqa64 %%ZIV04_07, %%B2L04_07
+ vmovdqa64 %%ZIV08_11, %%B2L08_11
+ vmovdqa64 %%ZIV12_15, %%B2L12_15
+%endif
+ ;; write back cipher text for lanes 0-3
+ TRANSPOSE_4x4 %%B0L00_03, %%B1L00_03, %%B2L00_03, %%B3L00_03, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 0, 1, 2, 3, %%OUT, %%IDX, %%B0L00_03, %%B1L00_03, \
+ %%B2L00_03, %%B3L00_03, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, STORE, k1
+
+ ;; write back cipher text for lanes 4-7
+ TRANSPOSE_4x4 %%B0L04_07, %%B1L04_07, %%B2L04_07, %%B3L04_07, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 4, 5, 6, 7, %%OUT, %%IDX, %%B0L04_07, %%B1L04_07, \
+ %%B2L04_07, %%B3L04_07, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, STORE, k1
+
+ ;; write back cipher text for lanes 8-11
+ TRANSPOSE_4x4 %%B0L08_11, %%B1L08_11, %%B2L08_11, %%B3L08_11, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 8, 9, 10, 11, %%OUT, %%IDX, %%B0L08_11, %%B1L08_11, \
+ %%B2L08_11, %%B3L08_11, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, STORE, k1
+
+ ;; write back cipher text for lanes 12-15
+ TRANSPOSE_4x4 %%B0L12_15, %%B1L12_15, %%B2L12_15, %%B3L12_15, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3
+
+ LOAD_STORE_x4 12, 13, 14, 15, %%OUT, %%IDX, %%B0L12_15, %%B1L12_15, \
+ %%B2L12_15, %%B3L12_15, %%TMP0, %%TMP1, %%TMP2, \
+ %%TMP3, STORE, k1
+
+ ;; update in/out pointers
+ mov %%IDX, %%NUM_BLKS
+ shl %%IDX, 4
+ vpbroadcastq %%ZTMP2, %%IDX
+ vpaddq %%ZTMP0, %%ZTMP2, [%%IN]
+ vpaddq %%ZTMP1, %%ZTMP2, [%%IN + 64]
+ vmovdqa64 [%%IN], %%ZTMP0
+ vmovdqa64 [%%IN + 64], %%ZTMP1
+
+ vpaddq %%ZTMP0, %%ZTMP2, [%%OUT]
+ vpaddq %%ZTMP1, %%ZTMP2, [%%OUT + 64]
+ vmovdqa64 [%%OUT], %%ZTMP0
+ vmovdqa64 [%%OUT + 64], %%ZTMP1
+
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CBC_ENC Encodes given data.
+; Requires the input data be at least 1 block (16 bytes) long
+; Input: Number of AES rounds
+;
+; First encrypts block up to multiple of 4
+; Then encrypts final blocks (less than 4)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CBC_ENC 1
+%define %%ROUNDS %1
+
+ ;; load transpose tables
+ vmovdqa64 TAB_A0B0A1B1, [rel A0B0A1B1]
+ vmovdqa64 TAB_A2B2A3B3, [rel A2B2A3B3]
+
+ ;; load IV's per lane
+ vmovdqa64 ZIV00_03, [ARG + _aesarg_IV + 16*0]
+ vmovdqa64 ZIV04_07, [ARG + _aesarg_IV + 16*4]
+ vmovdqa64 ZIV08_11, [ARG + _aesarg_IV + 16*8]
+ vmovdqa64 ZIV12_15, [ARG + _aesarg_IV + 16*12]
+
+ ENCRYPT_16_PARALLEL ZIV00_03, ZIV04_07, ZIV08_11, ZIV12_15, \
+ LEN, %%ROUNDS, IA12, ZT0, ZT1, ZT2, ZT3, ZT4, ZT5, \
+ ZT6, ZT7, ZT8, ZT9, ZT10, ZT11, ZT12, ZT13, ZT14, \
+ ZT15, ZT16, ZT17, ZT18, ZT19, IA2, IA3, IA4, IA5
+
+ ;; get num remaining blocks
+ shr LEN, 4
+ and LEN, 3
+ je %%_cbc_enc_done
+ cmp LEN, 1
+ je %%_final_blocks_1
+ cmp LEN, 2
+ je %%_final_blocks_2
+
+%%_final_blocks_3:
+ ENCRYPT_16_FINAL ZIV00_03, ZIV04_07, ZIV08_11, ZIV12_15, \
+ %%ROUNDS, IA12, ZT0, ZT1, ZT2, ZT3, ZT4, ZT5, ZT6, ZT7, \
+ ZT8, ZT9, ZT10, ZT11, ZT12, ZT13, ZT14, ZT15, ZT16, ZT17, \
+ ZT18, ZT19, IA2, IA3, IA4, IA5, 3
+ jmp %%_cbc_enc_done
+%%_final_blocks_1:
+ ENCRYPT_16_FINAL ZIV00_03, ZIV04_07, ZIV08_11, ZIV12_15, \
+ %%ROUNDS, IA12, ZT0, ZT1, ZT2, ZT3, ZT4, ZT5, ZT6, ZT7, \
+ ZT8, ZT9, ZT10, ZT11, ZT12, ZT13, ZT14, ZT15, ZT16, ZT17, \
+ ZT18, ZT19, IA2, IA3, IA4, IA5, 1
+ jmp %%_cbc_enc_done
+%%_final_blocks_2:
+ ENCRYPT_16_FINAL ZIV00_03, ZIV04_07, ZIV08_11, ZIV12_15, \
+ %%ROUNDS, IA12, ZT0, ZT1, ZT2, ZT3, ZT4, ZT5, ZT6, ZT7, \
+ ZT8, ZT9, ZT10, ZT11, ZT12, ZT13, ZT14, ZT15, ZT16, ZT17, \
+ ZT18, ZT19, IA2, IA3, IA4, IA5, 2
+%%_cbc_enc_done:
+ ;; store IV's per lane
+ vmovdqa64 [ARG + _aesarg_IV + 16*0], ZIV00_03
+ vmovdqa64 [ARG + _aesarg_IV + 16*4], ZIV04_07
+ vmovdqa64 [ARG + _aesarg_IV + 16*8], ZIV08_11
+ vmovdqa64 [ARG + _aesarg_IV + 16*12], ZIV12_15
+%endmacro
+
+
+section .data
+;;;;;;;;;;;;;;;;;;
+; Transpose tables
+;;;;;;;;;;;;;;;;;;
+default rel
+
+align 64
+A0B0A1B1:
+ dq 0x0, 0x1, 0x8, 0x9, 0x2, 0x3, 0xa, 0xb
+
+align 64
+A2B2A3B3:
+ dq 0x4, 0x5, 0xc, 0xd, 0x6, 0x7, 0xe, 0xf
+
+
+section .text
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_128_vaes_avx512(AES_ARGS *args, uint64_t len_in_bytes);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_enc_128_vaes_avx512,function,internal)
+aes_cbc_enc_128_vaes_avx512:
+ FUNC_SAVE
+ CBC_ENC 9
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_192_vaes_avx512(AES_ARGS *args, uint64_t len_in_bytes);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_enc_192_vaes_avx512,function,internal)
+aes_cbc_enc_192_vaes_avx512:
+ FUNC_SAVE
+ CBC_ENC 11
+ FUNC_RESTORE
+ ret
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_256_vaes_avx512(AES_ARGS *args, uint64_t len_in_bytes);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cbc_enc_256_vaes_avx512,function,internal)
+aes_cbc_enc_256_vaes_avx512:
+ FUNC_SAVE
+ CBC_ENC 13
+ FUNC_RESTORE
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/cntr_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/cntr_vaes_avx512.asm
new file mode 100644
index 000000000..50ff86b6e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/cntr_vaes_avx512.asm
@@ -0,0 +1,1524 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "mb_mgr_datastruct.asm"
+%include "job_aes_hmac.asm"
+%include "include/memcpy.asm"
+
+%include "include/aes_common.asm"
+%include "include/const.inc"
+
+section .data
+default rel
+
+align 16
+ONE:
+ dq 0x0000000000000001, 0x0000000000000000
+
+align 64
+SHUF_MASK:
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+
+align 64
+ddq_add_13_16:
+ dq 0x000000000000000d, 0x0000000000000000
+ dq 0x000000000000000e, 0x0000000000000000
+ dq 0x000000000000000f, 0x0000000000000000
+ dq 0x0000000000000010, 0x0000000000000000
+
+align 64
+ddq_add_9_12:
+ dq 0x0000000000000009, 0x0000000000000000
+ dq 0x000000000000000a, 0x0000000000000000
+ dq 0x000000000000000b, 0x0000000000000000
+ dq 0x000000000000000c, 0x0000000000000000
+
+align 64
+ddq_add_5_8:
+ dq 0x0000000000000005, 0x0000000000000000
+ dq 0x0000000000000006, 0x0000000000000000
+ dq 0x0000000000000007, 0x0000000000000000
+ dq 0x0000000000000008, 0x0000000000000000
+
+align 64
+ddq_add_1_4:
+ dq 0x0000000000000001, 0x0000000000000000
+ dq 0x0000000000000002, 0x0000000000000000
+ dq 0x0000000000000003, 0x0000000000000000
+ dq 0x0000000000000004, 0x0000000000000000
+
+align 64
+ddq_add_12_15:
+ dq 0x000000000000000c, 0x0000000000000000
+ dq 0x000000000000000d, 0x0000000000000000
+ dq 0x000000000000000e, 0x0000000000000000
+ dq 0x000000000000000f, 0x0000000000000000
+
+align 64
+ddq_add_8_11:
+ dq 0x0000000000000008, 0x0000000000000000
+ dq 0x0000000000000009, 0x0000000000000000
+ dq 0x000000000000000a, 0x0000000000000000
+ dq 0x000000000000000b, 0x0000000000000000
+
+align 64
+ddq_add_4_7:
+ dq 0x0000000000000004, 0x0000000000000000
+ dq 0x0000000000000005, 0x0000000000000000
+ dq 0x0000000000000006, 0x0000000000000000
+ dq 0x0000000000000007, 0x0000000000000000
+
+align 64
+ddq_add_0_3:
+ dq 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000001, 0x0000000000000000
+ dq 0x0000000000000002, 0x0000000000000000
+ dq 0x0000000000000003, 0x0000000000000000
+
+align 64
+ddq_add_16:
+ dq 0x0000000000000010, 0x0000000000000000
+ dq 0x0000000000000010, 0x0000000000000000
+ dq 0x0000000000000010, 0x0000000000000000
+ dq 0x0000000000000010, 0x0000000000000000
+
+align 64
+byte64_len_to_mask_table:
+ dq 0x0000000000000000, 0x0000000000000001
+ dq 0x0000000000000003, 0x0000000000000007
+ dq 0x000000000000000f, 0x000000000000001f
+ dq 0x000000000000003f, 0x000000000000007f
+ dq 0x00000000000000ff, 0x00000000000001ff
+ dq 0x00000000000003ff, 0x00000000000007ff
+ dq 0x0000000000000fff, 0x0000000000001fff
+ dq 0x0000000000003fff, 0x0000000000007fff
+ dq 0x000000000000ffff, 0x000000000001ffff
+ dq 0x000000000003ffff, 0x000000000007ffff
+ dq 0x00000000000fffff, 0x00000000001fffff
+ dq 0x00000000003fffff, 0x00000000007fffff
+ dq 0x0000000000ffffff, 0x0000000001ffffff
+ dq 0x0000000003ffffff, 0x0000000007ffffff
+ dq 0x000000000fffffff, 0x000000001fffffff
+ dq 0x000000003fffffff, 0x000000007fffffff
+ dq 0x00000000ffffffff, 0x00000001ffffffff
+ dq 0x00000003ffffffff, 0x00000007ffffffff
+ dq 0x0000000fffffffff, 0x0000001fffffffff
+ dq 0x0000003fffffffff, 0x0000007fffffffff
+ dq 0x000000ffffffffff, 0x000001ffffffffff
+ dq 0x000003ffffffffff, 0x000007ffffffffff
+ dq 0x00000fffffffffff, 0x00001fffffffffff
+ dq 0x00003fffffffffff, 0x00007fffffffffff
+ dq 0x0000ffffffffffff, 0x0001ffffffffffff
+ dq 0x0003ffffffffffff, 0x0007ffffffffffff
+ dq 0x000fffffffffffff, 0x001fffffffffffff
+ dq 0x003fffffffffffff, 0x007fffffffffffff
+ dq 0x00ffffffffffffff, 0x01ffffffffffffff
+ dq 0x03ffffffffffffff, 0x07ffffffffffffff
+ dq 0x0fffffffffffffff, 0x1fffffffffffffff
+ dq 0x3fffffffffffffff, 0x7fffffffffffffff
+ dq 0xffffffffffffffff
+
+align 16
+initial_12_IV_counter:
+ dq 0x0000000000000000, 0x0100000000000000
+
+mask_16_bytes:
+ dq 0x000000000000ffff
+
+section .text
+default rel
+
+%ifdef LINUX
+%define arg1 rdi
+%else
+%define arg1 rcx
+%endif
+
+%define ZKEY0 zmm17
+%define ZKEY1 zmm18
+%define ZKEY2 zmm19
+%define ZKEY3 zmm20
+%define ZKEY4 zmm21
+%define ZKEY5 zmm22
+%define ZKEY6 zmm23
+%define ZKEY7 zmm24
+%define ZKEY8 zmm25
+%define ZKEY9 zmm26
+%define ZKEY10 zmm27
+%define ZKEY11 zmm28
+%define ZKEY12 zmm29
+%define ZKEY13 zmm30
+%define ZKEY14 zmm31
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Stack frame definition
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%ifidn __OUTPUT_FORMAT__, win64
+ %define GP_STORAGE (7*8) ; space for 7 GP registers
+%else
+ %define GP_STORAGE (5*8) ; space for 5 GP registers
+%endif
+
+%define STACK_FRAME_SIZE GP_STORAGE
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; This macro is used to maintain the bits from the output text
+;;; when writing out the output blocks, in case there are some bits
+;;; that do not require encryption
+%macro PRESERVE_BITS 12-13
+%define %%RBITS %1 ; [in] Remaining bits in last byte
+%define %%LENGTH %2 ; [in] Length of the last set of blocks
+%define %%CYPH_PLAIN_OUT %3 ; [in] Pointer to output buffer
+%define %%ZIN_OUT %4 ; [in/out] ZMM with last set of output blocks
+%define %%ZTMP0 %5 ; [clobbered] ZMM temporary
+%define %%ZTMP1 %6 ; [clobbered] ZMM temporary
+%define %%ZTMP2 %7 ; [clobbered] ZMM temporary
+%define %%IA0 %8 ; [clobbered] GP temporary
+%define %%IA1 %9 ; [clobbered] GP temporary
+%define %%blocks_to_skip %10 ; [in] Number of blocks to skip from output
+%define %%FULL_PARTIAL %11 ; [in] Last block type selection "full" or "partial"
+%define %%MASKREG %12 ; [clobbered] Mask register
+%define %%DATA_OFFSET %13 ; [in/out] Data offset
+%define %%NUM_ARGS %0
+
+;; offset = number of sets of 4 blocks to skip
+%assign offset (((%%blocks_to_skip) / 4) * 64)
+;; num_left_blocks = number of blocks in the last set
+%assign num_left_blocks (((%%blocks_to_skip) & 3) + 1) ;; Range 1-4 blocks
+
+%if %%NUM_ARGS == 13
+ ;; Load output to get last partial byte
+%ifidn %%FULL_PARTIAL, partial
+ vmovdqu8 %%ZTMP0{%%MASKREG}, [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + offset]
+%else
+ vmovdqu8 %%ZTMP0, [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + offset]
+%endif ; %%FULL_PARTIAL == partial
+%else
+ ;; Load output to get last partial byte (loading up to the last 4 blocks)
+ ZMM_LOAD_MASKED_BLOCKS_0_16 num_left_blocks, %%CYPH_PLAIN_OUT, offset, \
+ %%ZTMP0, no_zmm, no_zmm, no_zmm, %%MASKREG
+%endif ;; %%NUM_ARGS == 13
+
+ ;; Save RCX in temporary GP register
+ mov %%IA0, rcx
+ mov DWORD(%%IA1), 0xff
+ mov cl, BYTE(%%RBITS)
+ shr DWORD(%%IA1), cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, %%IA0
+
+ vmovq XWORD(%%ZTMP1), %%IA1
+
+ ;; Get number of full bytes in last block.
+ ;; Subtracting the bytes in the blocks to skip to the length of whole
+ ;; set of blocks gives us the number of bytes in the last block,
+ ;; but the last block has a partial byte at the end, so an extra byte
+ ;; needs to be subtracted
+ mov %%IA1, %%LENGTH
+ sub %%IA1, (%%blocks_to_skip * 16 + 1)
+ XVPSLLB XWORD(%%ZTMP1), %%IA1, XWORD(%%ZTMP2), %%IA0
+%if num_left_blocks == 4
+ vshufi64x2 %%ZTMP1, %%ZTMP1, %%ZTMP1, 0x15
+%elif num_left_blocks == 3
+ vshufi64x2 %%ZTMP1, %%ZTMP1, %%ZTMP1, 0x45
+%elif num_left_blocks == 2
+ vshufi64x2 %%ZTMP1, %%ZTMP1, %%ZTMP1, 0x51
+%endif ;; No need to shift if there is only one block
+
+ ;; At this point, ZTMP1 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; First, clear the last bits (not to be ciphered) of the last output block
+ ;; %%ZIN_OUT = %%ZIN_OUT AND NOT %%ZTMP1 (0x50 = andA!C)
+ vpternlogq %%ZIN_OUT, %%ZTMP1, %%ZTMP1, 0x50
+
+ ;; Then, set these last bits to the last bits coming from the output
+ ;; %%ZIN_OUT = %%ZIN_OUT OR (%%ZTMP0 AND %%ZTMP1) (0xF8 = orAandBC)
+ vpternlogq %%ZIN_OUT, %%ZTMP0, %%ZTMP1, 0xF8
+
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; This macro is used to "warm-up" pipeline for ENCRYPT_16_PARALLEL
+;;; macro code. It is called only for data lengths 256 and above.
+;;; The flow is as follows:
+;;; - encrypt the initial %%num_initial_blocks blocks (can be 0)
+;;; - encrypt the next 16 blocks
+;;; - the last 16th block can be partial (lengths between 257 and 367)
+;;; - partial block ciphering is handled within this macro
+
+%macro INITIAL_BLOCKS 26
+%define %%KEY %1 ; [in] pointer to key
+%define %%CYPH_PLAIN_OUT %2 ; [in] output buffer
+%define %%PLAIN_CYPH_IN %3 ; [in] input buffer
+%define %%LENGTH %4 ; [in/out] number of bytes to process
+%define %%DATA_OFFSET %5 ; [in/out] data offset
+%define %%num_initial_blocks %6 ; [in] can be between 0 and 15
+%define %%CTR %7 ; [in] XMM first counter block
+%define %%CTR_1_4 %8 ; [out] ZMM next 1-4 counter blocks
+%define %%CTR_5_8 %9 ; [out] ZMM next 5-8 counter blocks
+%define %%CTR_9_12 %10 ; [out] ZMM next 9-12 counter blocks
+%define %%CTR_13_16 %11 ; [out] ZMM next 13-16 counter blocks
+%define %%ZT1 %12 ; [clobbered] ZMM temporary
+%define %%ZT2 %13 ; [clobbered] ZMM temporary
+%define %%ZT3 %14 ; [clobbered] ZMM temporary
+%define %%ZT4 %15 ; [clobbered] ZMM temporary
+%define %%ZT5 %16 ; [clobbered] ZMM temporary
+%define %%ZT6 %17 ; [clobbered] ZMM temporary
+%define %%ZT7 %18 ; [clobbered] ZMM temporary
+%define %%ZT8 %19 ; [clobbered] ZMM temporary
+%define %%IA0 %20 ; [clobbered] GP temporary
+%define %%IA1 %21 ; [clobbered] GP temporary
+%define %%MASKREG %22 ; [clobbered] mask register
+%define %%SHUFREG %23 ; [in] ZMM register with shuffle mask
+%define %%NROUNDS %24 ; [in] number of rounds; numerical value
+%define %%CNTR_TYPE %25 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+%define %%RBITS %26 ; [in] Number of remaining bits in last byte
+
+%define %%T1 XWORD(%%ZT1)
+%define %%T2 XWORD(%%ZT2)
+%define %%T3 XWORD(%%ZT3)
+%define %%T4 XWORD(%%ZT4)
+%define %%T5 XWORD(%%ZT5)
+%define %%T6 XWORD(%%ZT6)
+%define %%T7 XWORD(%%ZT7)
+%define %%T8 XWORD(%%ZT8)
+
+%ifidn %%CNTR_TYPE, CNTR
+%define %%VPADD vpaddd
+%else
+%define %%VPADD vpaddq
+%endif
+
+%if %%num_initial_blocks > 0
+ ;; load plain/cipher text
+ ZMM_LOAD_BLOCKS_0_16 %%num_initial_blocks, %%PLAIN_CYPH_IN, 0, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, load_4_instead_of_3
+
+ ;; prepare AES counter blocks
+%if %%num_initial_blocks > 1
+%if %%num_initial_blocks == 2
+ vshufi64x2 YWORD(%%ZT1), YWORD(%%CTR), YWORD(%%CTR), 0
+ %%VPADD YWORD(%%ZT1), YWORD(%%ZT1), [rel ddq_add_0_3]
+%elif %%num_initial_blocks <= 4
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ %%VPADD %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+%elif %%num_initial_blocks <= 8
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ %%VPADD %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ %%VPADD %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+%elif %%num_initial_blocks <= 12
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ %%VPADD %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ %%VPADD %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+ %%VPADD %%ZT3, ZWORD(%%CTR), [rel ddq_add_8_11]
+%else
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ %%VPADD %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ %%VPADD %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+ %%VPADD %%ZT3, ZWORD(%%CTR), [rel ddq_add_8_11]
+ %%VPADD %%ZT4, ZWORD(%%CTR), [rel ddq_add_12_15]
+%endif
+%endif
+
+ ;; extract new counter value (%%T1)
+ ;; shuffle the counters for AES rounds
+%if %%num_initial_blocks == 1
+ vpshufb %%T1, %%CTR, XWORD(%%SHUFREG)
+%elif %%num_initial_blocks == 2
+ vextracti32x4 %%CTR, YWORD(%%ZT1), 1
+ vpshufb YWORD(%%ZT1), YWORD(%%SHUFREG)
+%elif %%num_initial_blocks <= 4
+ vextracti32x4 %%CTR, %%ZT1, (%%num_initial_blocks - 1)
+ vpshufb %%ZT1, %%SHUFREG
+%elif %%num_initial_blocks == 5
+ vmovdqa64 %%CTR, %%T2
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%T2, XWORD(%%SHUFREG)
+%elif %%num_initial_blocks == 6
+ vextracti32x4 %%CTR, YWORD(%%ZT2), 1
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb YWORD(%%ZT2), YWORD(%%SHUFREG)
+%elif %%num_initial_blocks = 7
+ vextracti32x4 %%CTR, %%ZT2, 2
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+%elif %%num_initial_blocks = 8
+ vextracti32x4 %%CTR, %%ZT2, 3
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+%elif %%num_initial_blocks = 9
+ vmovdqa64 %%CTR, %%T3
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%T3, XWORD(%%SHUFREG)
+%elif %%num_initial_blocks = 10
+ vextracti32x4 %%CTR, YWORD(%%ZT3), 1
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb YWORD(%%ZT3), YWORD(%%SHUFREG)
+%elif %%num_initial_blocks = 11
+ vextracti32x4 %%CTR, %%ZT3, 2
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%ZT3, %%SHUFREG
+%elif %%num_initial_blocks = 12
+ vextracti32x4 %%CTR, %%ZT3, 3
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%ZT3, %%SHUFREG
+%elif %%num_initial_blocks = 13
+ vmovdqa64 %%CTR, %%T4
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%ZT3, %%SHUFREG
+ vpshufb %%T4, XWORD(%%SHUFREG)
+%elif %%num_initial_blocks = 14
+ vextracti32x4 %%CTR, YWORD(%%ZT4), 1
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%ZT3, %%SHUFREG
+ vpshufb YWORD(%%ZT4), YWORD(%%SHUFREG)
+%elif %%num_initial_blocks = 15
+ vextracti32x4 %%CTR, %%ZT4, 2
+ vpshufb %%ZT1, %%SHUFREG
+ vpshufb %%ZT2, %%SHUFREG
+ vpshufb %%ZT3, %%SHUFREG
+ vpshufb %%ZT4, %%SHUFREG
+%endif
+
+ ;; AES rounds and XOR with plain/cipher text
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESENC_ROUND_BLOCKS_0_16 \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, ZKEY %+ j, j, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, %%num_initial_blocks, \
+ %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+ ;; write cipher/plain text back to output
+ ZMM_STORE_BLOCKS_0_16 %%num_initial_blocks, %%CYPH_PLAIN_OUT, 0, \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4
+
+ ;; adjust data offset and length
+ sub %%LENGTH, (%%num_initial_blocks * 16)
+ add %%DATA_OFFSET, (%%num_initial_blocks * 16)
+%endif ; %%num_initial_blocks > 0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; - cipher of %%num_initial_blocks is done
+ ;; - prepare counter blocks for the next 16 blocks (ZT5-ZT8)
+ ;; - shuffle the blocks for AES
+ ;; - encrypt the next 16 blocks
+
+ ;; get text load/store mask (assume full mask by default)
+ mov %%IA0, 0xffff_ffff_ffff_ffff
+%if %%num_initial_blocks > 0
+ ;; NOTE: 'jge' is always taken for %%num_initial_blocks = 0
+ ;; This macro is executed for length 256 and up,
+ ;; zero length is checked in CNTR_ENC_DEC.
+ ;; We know there is partial block if:
+ ;; LENGTH - 16*num_initial_blocks < 256
+ cmp %%LENGTH, 256
+ jge %%_initial_partial_block_continue
+ mov %%IA1, rcx
+ mov rcx, 256
+ sub rcx, %%LENGTH
+ shr %%IA0, cl
+ mov rcx, %%IA1
+%%_initial_partial_block_continue:
+%endif
+ kmovq %%MASKREG, %%IA0
+ ;; load plain or cipher text
+ vmovdqu8 %%ZT5, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vmovdqu8 %%ZT6, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 64]
+ vmovdqu8 %%ZT7, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 128]
+ vmovdqu8 %%ZT8{%%MASKREG}{z}, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 192]
+
+ ;; prepare next counter blocks
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+%if %%num_initial_blocks > 0
+ vpaddd %%CTR_1_4, ZWORD(%%CTR), [rel ddq_add_1_4]
+ vpaddd %%CTR_5_8, ZWORD(%%CTR), [rel ddq_add_5_8]
+ vpaddd %%CTR_9_12, ZWORD(%%CTR), [rel ddq_add_9_12]
+ vpaddd %%CTR_13_16, ZWORD(%%CTR), [rel ddq_add_13_16]
+%else
+ vpaddd %%CTR_1_4, ZWORD(%%CTR), [rel ddq_add_0_3]
+ vpaddd %%CTR_5_8, ZWORD(%%CTR), [rel ddq_add_4_7]
+ vpaddd %%CTR_9_12, ZWORD(%%CTR), [rel ddq_add_8_11]
+ vpaddd %%CTR_13_16, ZWORD(%%CTR), [rel ddq_add_12_15]
+%endif
+
+ vpshufb %%ZT1, %%CTR_1_4, %%SHUFREG
+ vpshufb %%ZT2, %%CTR_5_8, %%SHUFREG
+ vpshufb %%ZT3, %%CTR_9_12, %%SHUFREG
+ vpshufb %%ZT4, %%CTR_13_16, %%SHUFREG
+
+ ;; AES rounds and XOR with plain/cipher text
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESENC_ROUND_BLOCKS_0_16 \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, ZKEY %+ j, j, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, 16, %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; check if this is the end of the message
+ cmp %%LENGTH, 256
+ jg %%store_output
+ ;; Check if there is a partial byte
+ or %%RBITS, %%RBITS
+ jz %%store_output
+
+ ;; Copy the bits that are not ciphered from the output text,
+ ;; into the last bits of the output block, before writing it out
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, 15, partial, %%MASKREG, %%DATA_OFFSET
+
+%endif
+
+%%store_output:
+ ;; write cipher/plain text back to output
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], %%ZT1
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 64], %%ZT2
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 128], %%ZT3
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 192]{%%MASKREG}, %%ZT4
+
+ ;; check if there is partial block
+ cmp %%LENGTH, 256
+ jl %%_initial_partial_done
+ ;; adjust offset and length
+ add %%DATA_OFFSET, 256
+ sub %%LENGTH, 256
+ jmp %%_initial_blocks_done
+%%_initial_partial_done:
+ ;; zero the length (all encryption is complete)
+ xor %%LENGTH, %%LENGTH
+%%_initial_blocks_done:
+
+%endmacro ; INITIAL_BLOCKS
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; INITIAL_BLOCKS_PARTIAL macro with support for a partial final block.
+;;; It may look similar to INITIAL_BLOCKS but its usage is different:
+;;; - It is not meant to cipher counter blocks for the main by16 loop.
+;;; Just ciphers amount of blocks.
+;;; - Small packets (<256 bytes)
+;;;
+;;; num_initial_blocks is expected to include the partial final block
+;;; in the count.
+%macro INITIAL_BLOCKS_PARTIAL 21
+%define %%KEY %1 ; [in] key pointer
+%define %%CYPH_PLAIN_OUT %2 ; [in] text out pointer
+%define %%PLAIN_CYPH_IN %3 ; [in] text out pointer
+%define %%LENGTH %4 ; [in/clobbered] length in bytes
+%define %%num_initial_blocks %5 ; [in] can be from 1 to 16 (not 0)
+%define %%CTR %6 ; [in/out] current counter value
+%define %%ZT1 %7 ; [clobbered] ZMM temporary
+%define %%ZT2 %8 ; [clobbered] ZMM temporary
+%define %%ZT3 %9 ; [clobbered] ZMM temporary
+%define %%ZT4 %10 ; [clobbered] ZMM temporary
+%define %%ZT5 %11 ; [clobbered] ZMM temporary
+%define %%ZT6 %12 ; [clobbered] ZMM temporary
+%define %%ZT7 %13 ; [clobbered] ZMM temporary
+%define %%ZT8 %14 ; [clobbered] ZMM temporary
+%define %%IA0 %15 ; [clobbered] GP temporary
+%define %%IA1 %16 ; [clobbered] GP temporary
+%define %%MASKREG %17 ; [clobbered] mask register
+%define %%SHUFREG %18 ; [in] ZMM register with shuffle mask
+%define %%NROUNDS %19 ; [in] number of rounds; numerical value
+%define %%CNTR_TYPE %20 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+%define %%RBITS %21 ; [in] Number of remaining bits in last byte
+
+%define %%T1 XWORD(%%ZT1)
+%define %%T2 XWORD(%%ZT2)
+%define %%T3 XWORD(%%ZT3)
+%define %%T4 XWORD(%%ZT4)
+%define %%T5 XWORD(%%ZT5)
+%define %%T6 XWORD(%%ZT6)
+%define %%T7 XWORD(%%ZT7)
+%define %%T8 XWORD(%%ZT8)
+
+ ;; get load/store mask
+ lea %%IA0, [rel byte64_len_to_mask_table]
+ mov %%IA1, %%LENGTH
+%if %%num_initial_blocks > 12
+ sub %%IA1, 192
+%elif %%num_initial_blocks > 8
+ sub %%IA1, 128
+%elif %%num_initial_blocks > 4
+ sub %%IA1, 64
+%endif
+ kmovq %%MASKREG, [%%IA0 + %%IA1*8]
+
+ ;; load plain/cipher text
+ ZMM_LOAD_MASKED_BLOCKS_0_16 %%num_initial_blocks, %%PLAIN_CYPH_IN, 0, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, %%MASKREG
+
+ ;; prepare AES counter blocks
+%if %%num_initial_blocks == 1
+ vmovdqa64 XWORD(%%ZT1), XWORD(%%CTR)
+%elif %%num_initial_blocks == 2
+ vshufi64x2 YWORD(%%ZT1), YWORD(%%CTR), YWORD(%%CTR), 0
+ vpaddd YWORD(%%ZT1), YWORD(%%ZT1), [rel ddq_add_0_3]
+%elif %%num_initial_blocks <= 4
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+%elif %%num_initial_blocks <= 8
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ vpaddd %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+%elif %%num_initial_blocks <= 12
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ vpaddd %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+ vpaddd %%ZT3, ZWORD(%%CTR), [rel ddq_add_8_11]
+%else
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT1, ZWORD(%%CTR), [rel ddq_add_0_3]
+ vpaddd %%ZT2, ZWORD(%%CTR), [rel ddq_add_4_7]
+ vpaddd %%ZT3, ZWORD(%%CTR), [rel ddq_add_8_11]
+ vpaddd %%ZT4, ZWORD(%%CTR), [rel ddq_add_12_15]
+%endif
+
+ ;; shuffle the counters for AES rounds
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, \
+ %%SHUFREG, %%SHUFREG, %%SHUFREG, %%SHUFREG
+
+ ;; AES rounds and XOR with plain/cipher text
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESENC_ROUND_BLOCKS_0_16 \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, ZKEY %+ j, j, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, %%num_initial_blocks, \
+ %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or %%RBITS, %%RBITS
+ jz %%store_output
+
+ ;; Copy the bits that are not ciphered from the output text,
+ ;; into the last bits of the output block, before writing it out
+%if %%num_initial_blocks <= 4
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT1, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, (%%num_initial_blocks - 1), \
+ partial, %%MASKREG
+%elif %%num_initial_blocks <= 8
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT2, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, (%%num_initial_blocks - 1), \
+ partial, %%MASKREG
+%elif %%num_initial_blocks <= 12
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT3, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, (%%num_initial_blocks - 1), \
+ partial, %%MASKREG
+%else
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, (%%num_initial_blocks - 1), \
+ partial, %%MASKREG
+%endif
+
+%endif
+
+%%store_output:
+ ;; write cipher/plain text back to output
+ ZMM_STORE_MASKED_BLOCKS_0_16 %%num_initial_blocks, %%CYPH_PLAIN_OUT, 0, \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, %%MASKREG
+
+%endmacro ; INITIAL_BLOCKS_PARTIAL
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Main CNTR macro
+;;; - operates on single stream
+;;; - encrypts 16 blocks at a time
+%macro ENCRYPT_16_PARALLEL 26
+%define %%KEY %1 ; [in] key pointer
+%define %%CYPH_PLAIN_OUT %2 ; [in] pointer to output buffer
+%define %%PLAIN_CYPH_IN %3 ; [in] pointer to input buffer
+%define %%DATA_OFFSET %4 ; [in] data offset
+%define %%CTR_1_4 %5 ; [in/out] ZMM next 1-4 counter blocks
+%define %%CTR_5_8 %6 ; [in/out] ZMM next 5-8 counter blocks
+%define %%CTR_9_12 %7 ; [in/out] ZMM next 9-12 counter blocks
+%define %%CTR_13_16 %8 ; [in/out] ZMM next 13-16 counter blocks
+%define %%FULL_PARTIAL %9 ; [in] last block type selection "full" or "partial"
+%define %%IA0 %10 ; [clobbered] temporary GP register
+%define %%IA1 %11 ; [clobbered] temporary GP register
+%define %%LENGTH %12 ; [in] length
+%define %%ZT1 %13 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT2 %14 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT3 %15 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT4 %16 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT5 %17 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT6 %18 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT7 %19 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT8 %20 ; [clobbered] temporary ZMM (cipher)
+%define %%MASKREG %21 ; [clobbered] mask register for partial loads/stores
+%define %%SHUFREG %22 ; [in] ZMM register with shuffle mask
+%define %%ADD8REG %23 ; [in] ZMM register with increment by 8 mask
+%define %%NROUNDS %24 ; [in] number of rounds; numerical value
+%define %%CNTR_TYPE %25 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+%define %%RBITS %26 ; [in] Number of remaining bits in last byte
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; load/store mask (partial case) and load the text data
+%ifidn %%FULL_PARTIAL, full
+ vmovdqu8 %%ZT5, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vmovdqu8 %%ZT6, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 64]
+ vmovdqu8 %%ZT7, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 128]
+ vmovdqu8 %%ZT8, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 192]
+%else
+ lea %%IA0, [rel byte64_len_to_mask_table]
+ mov %%IA1, %%LENGTH
+ sub %%IA1, (3*64)
+ kmovq %%MASKREG, [%%IA0 + 8*%%IA1]
+ vmovdqu8 %%ZT5, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vmovdqu8 %%ZT6, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 64]
+ vmovdqu8 %%ZT7, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 128]
+ vmovdqu8 %%ZT8{%%MASKREG}{z}, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 192]
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; populate counter blocks
+ ;; %%CTR is shuffled outside the scope of this macro
+ ;; it has to be kept in unshuffled form
+ vpaddd %%CTR_1_4, %%CTR_1_4, %%ADD8REG
+ vpaddd %%CTR_5_8, %%CTR_5_8, %%ADD8REG
+ vpaddd %%CTR_9_12, %%CTR_9_12, %%ADD8REG
+ vpaddd %%CTR_13_16, %%CTR_13_16, %%ADD8REG
+ vpshufb %%ZT1, %%CTR_1_4, %%SHUFREG
+ vpshufb %%ZT2, %%CTR_5_8, %%SHUFREG
+ vpshufb %%ZT3, %%CTR_9_12, %%SHUFREG
+ vpshufb %%ZT4, %%CTR_13_16, %%SHUFREG
+
+%assign j 0
+%rep (%%NROUNDS + 2)
+ ZMM_AESENC_ROUND_BLOCKS_0_16 \
+ %%ZT1, %%ZT2, %%ZT3, %%ZT4, ZKEY %+ j, j, \
+ %%ZT5, %%ZT6, %%ZT7, %%ZT8, 16, %%NROUNDS
+%assign j (j + 1)
+%endrep
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if this is the last round
+ cmp %%LENGTH, 256
+ jg %%store_output
+ ;; Check if there is a partial byte
+ or %%RBITS, %%RBITS
+ jz %%store_output
+
+ ;; Copy the bits that are not ciphered from the output text,
+ ;; into the last bits of the output block, before writing it out
+ PRESERVE_BITS %%RBITS, %%LENGTH, %%CYPH_PLAIN_OUT, %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%IA0, %%IA1, 15, %%FULL_PARTIAL, %%MASKREG, %%DATA_OFFSET
+
+%endif
+
+%%store_output:
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; store the text data
+%ifidn %%FULL_PARTIAL, full
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], %%ZT1
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 64], %%ZT2
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 128], %%ZT3
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 192], %%ZT4
+%else
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], %%ZT1
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 64], %%ZT2
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 128], %%ZT3
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 192]{%%MASKREG}, %%ZT4
+%endif
+
+%endmacro ; ENCRYPT_16_PARALLEL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Save register content for the caller
+%macro FUNC_SAVE 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ mov rax, rsp
+
+ sub rsp, STACK_FRAME_SIZE
+ and rsp, ~63
+
+ mov [rsp + 0*8], r12
+ mov [rsp + 1*8], r13
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov [rsp + 2*8], r14
+%endif
+ mov [rsp + 3*8], rax ; stack
+%ifidn __OUTPUT_FORMAT__, win64
+ mov [rsp + 4*8], rdi
+ mov [rsp + 5*8], rsi
+%endif
+
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Restore register content for the caller
+%macro FUNC_RESTORE 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+
+ vzeroupper
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rdi, [rsp + 4*8]
+ mov rsi, [rsp + 5*8]
+%endif
+ mov r12, [rsp + 0*8]
+ mov r13, [rsp + 1*8]
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r14, [rsp + 2*8]
+%endif
+ mov rsp, [rsp + 3*8] ; stack
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Cipher payloads shorter than 256 bytes
+;;; - number of blocks in the message comes as argument
+;;; - depending on the number of blocks an optimized variant of
+;;; INITIAL_BLOCKS_PARTIAL is invoked
+%macro CNTR_ENC_DEC_SMALL 21
+%define %%KEY %1 ; [in] key pointer
+%define %%CYPH_PLAIN_OUT %2 ; [in] output buffer
+%define %%PLAIN_CYPH_IN %3 ; [in] input buffer
+%define %%LENGTH %4 ; [in] data length
+%define %%NUM_BLOCKS %5 ; [in] number of blocks to process 1 to 8
+%define %%CTR %6 ; [in/out] XMM counter block
+%define %%ZTMP1 %7 ; [clobbered] ZMM register
+%define %%ZTMP2 %8 ; [clobbered] ZMM register
+%define %%ZTMP3 %9 ; [clobbered] ZMM register
+%define %%ZTMP4 %10 ; [clobbered] ZMM register
+%define %%ZTMP5 %11 ; [clobbered] ZMM register
+%define %%ZTMP6 %12 ; [clobbered] ZMM register
+%define %%ZTMP7 %13 ; [clobbered] ZMM register
+%define %%ZTMP8 %14 ; [clobbered] ZMM register
+%define %%IA0 %15 ; [clobbered] GP register
+%define %%IA1 %16 ; [clobbered] GP register
+%define %%MASKREG %17 ; [clobbered] mask register
+%define %%SHUFREG %18 ; [in] ZMM register with shuffle mask
+%define %%NROUNDS %19 ; [in] number of rounds; numerical value
+%define %%CNTR_TYPE %20 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+%define %%RBITS %21 ; [in] Number of remaining bits in last byte
+
+ cmp %%NUM_BLOCKS, 8
+ je %%_small_initial_num_blocks_is_8
+ jl %%_small_initial_blocks_is_1_7
+
+ ; Initial blocks 9-16
+ cmp %%NUM_BLOCKS, 12
+ je %%_small_initial_num_blocks_is_12
+ jl %%_small_initial_blocks_is_9_11
+
+ ; Initial blocks 13-16
+ cmp %%NUM_BLOCKS, 16
+ je %%_small_initial_num_blocks_is_16
+ cmp %%NUM_BLOCKS, 15
+ je %%_small_initial_num_blocks_is_15
+ cmp %%NUM_BLOCKS, 14
+ je %%_small_initial_num_blocks_is_14
+ cmp %%NUM_BLOCKS, 13
+ je %%_small_initial_num_blocks_is_13
+
+%%_small_initial_blocks_is_9_11:
+ cmp %%NUM_BLOCKS, 11
+ je %%_small_initial_num_blocks_is_11
+ cmp %%NUM_BLOCKS, 10
+ je %%_small_initial_num_blocks_is_10
+ cmp %%NUM_BLOCKS, 9
+ je %%_small_initial_num_blocks_is_9
+
+%%_small_initial_blocks_is_1_7:
+ cmp %%NUM_BLOCKS, 4
+ je %%_small_initial_num_blocks_is_4
+ jl %%_small_initial_blocks_is_1_3
+
+ ; Initial blocks 5-7
+ cmp %%NUM_BLOCKS, 7
+ je %%_small_initial_num_blocks_is_7
+ cmp %%NUM_BLOCKS, 6
+ je %%_small_initial_num_blocks_is_6
+ cmp %%NUM_BLOCKS, 5
+ je %%_small_initial_num_blocks_is_5
+
+%%_small_initial_blocks_is_1_3:
+ cmp %%NUM_BLOCKS, 3
+ je %%_small_initial_num_blocks_is_3
+ cmp %%NUM_BLOCKS, 2
+ je %%_small_initial_num_blocks_is_2
+
+ jmp %%_small_initial_num_blocks_is_1
+
+
+%%_small_initial_num_blocks_is_16:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 16, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_15:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 15, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_14:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 14, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_13:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 13, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_12:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 12, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_11:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 11, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_10:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 10, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_9:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 9, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+%%_small_initial_num_blocks_is_8:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 8, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_7:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 7, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_6:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 6, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_5:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 5, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_4:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 4, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_3:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 3, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_2:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 2, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_1:
+ INITIAL_BLOCKS_PARTIAL %%KEY, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, 1, \
+ %%CTR, \
+ %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, \
+ %%ZTMP6, %%ZTMP7, %%ZTMP8, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+%%_small_initial_blocks_encrypted:
+
+%endmacro ; CNTR_ENC_DEC_SMALL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CNTR_ENC_DEC Encodes/Decodes given data.
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: job structure and number of AES rounds
+; Output: job structure
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CNTR_ENC_DEC 3
+%define %%JOB %1 ; [in/out] job
+%define %%NROUNDS %2 ; [in] number of rounds; numerical value
+%define %%CNTR_TYPE %3 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+
+%define %%KEY rax
+%define %%CYPH_PLAIN_OUT rdx
+%define %%PLAIN_CYPH_IN r8
+%define %%LENGTH r9
+%define %%DATA_OFFSET r13
+%define %%RBITS r14
+
+%define %%IA0 r10
+%define %%IA1 r11
+%define %%IA2 r12
+
+%define %%CTR_BLOCKx xmm0
+%define %%CTR_BLOCK_1_4 zmm1
+%define %%CTR_BLOCK_5_8 zmm2
+%define %%CTR_BLOCK_9_12 zmm3
+%define %%CTR_BLOCK_13_16 zmm4
+
+%define %%ZTMP0 zmm5
+%define %%ZTMP1 zmm6
+%define %%ZTMP2 zmm7
+%define %%ZTMP3 zmm8
+%define %%ZTMP4 zmm9
+%define %%ZTMP5 zmm10
+%define %%ZTMP6 zmm11
+%define %%ZTMP7 zmm12
+%define %%SHUFREG zmm13
+%define %%ADD8REG zmm14
+
+%define %%MASKREG k1
+
+;;; Macro flow:
+;;; - calculate the number of 16byte blocks in the message
+;;; - process (number of 16byte blocks) mod 16 '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+;;; - process 16x16 byte blocks at a time until all are done in %%_encrypt_by_16_new
+
+ mov %%LENGTH, [%%JOB + _msg_len_to_cipher]
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov %%RBITS, %%LENGTH
+ add %%LENGTH, 7
+ shr %%LENGTH, 3 ; LENGTH will hold number of bytes (including partial byte)
+ and %%RBITS, 7 ; Get remainder bits in last byte (0-7)
+%endif
+
+%ifidn __OUTPUT_FORMAT__, win64
+ cmp %%LENGTH, 0
+%else
+ or %%LENGTH, %%LENGTH
+%endif
+ je %%_enc_dec_done
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+
+ mov %%PLAIN_CYPH_IN, [%%JOB + _src]
+ add %%PLAIN_CYPH_IN, [%%JOB + _cipher_start_src_offset_in_bytes]
+ mov %%CYPH_PLAIN_OUT, [%%JOB + _dst]
+ mov %%KEY, [%%JOB + _aes_enc_key_expanded]
+
+ ;; Prepare round keys (only first 10, due to lack of registers)
+%assign i 0
+%rep (%%NROUNDS + 2)
+ vbroadcastf64x2 ZKEY %+ i, [%%KEY + 16*i]
+%assign i (i + 1)
+%endrep
+
+ mov %%IA1, [%%JOB + _iv]
+%ifidn %%CNTR_TYPE, CNTR
+ ;; Prepare initial mask to read 12 IV bytes
+ mov %%IA0, 0x0000_0000_0000_0fff
+ vmovdqa %%CTR_BLOCKx, [rel initial_12_IV_counter]
+ mov %%IA2, [%%JOB + _iv_len_in_bytes]
+ test %%IA2, 16
+ ;; Set mask to read 16 IV bytes if iv_len = 16
+ cmovnz %%IA0, [rel mask_16_bytes]
+
+ kmovq %%MASKREG, %%IA0
+ vmovdqu8 %%CTR_BLOCKx{%%MASKREG}, [%%IA1]
+%else ;; CNTR_BIT
+ ;; Read the full 16 bytes of IV
+ vmovdqu8 %%CTR_BLOCKx, [%%IA1]
+%endif ;; CNTR/CNTR_BIT
+
+ vmovdqa64 %%SHUFREG, [rel SHUF_MASK]
+ ;; store IV as counter in LE format
+ vpshufb %%CTR_BLOCKx, XWORD(%%SHUFREG)
+
+ ;; Determine how many blocks to process in INITIAL
+ mov %%IA1, %%LENGTH
+ shr %%IA1, 4
+ and %%IA1, 0xf
+
+ ;; Process one additional block in INITIAL if there is a partial block
+ mov %%IA0, %%LENGTH
+ and %%IA0, 0xf
+ add %%IA0, 0xf
+ shr %%IA0, 4
+ add %%IA1, %%IA0
+ ;; %%IA1 can be in the range from 0 to 16
+
+ ;; Less than 256B will be handled by the small message code, which
+ ;; can process up to 16 x blocks (16 bytes each)
+ cmp %%LENGTH, 256
+ jge %%_large_message_path
+
+ CNTR_ENC_DEC_SMALL \
+ %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, \
+ %%IA1, %%CTR_BLOCKx, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%IA0, %%IA2, %%MASKREG, %%SHUFREG, %%NROUNDS, \
+ %%CNTR_TYPE, %%RBITS
+
+ jmp %%_enc_dec_done
+
+%%_large_message_path:
+ ;; Still, don't allow 16 INITIAL blocks since this will
+ ;; can be handled by the x16 partial loop.
+ and %%IA1, 0xf
+ je %%_initial_num_blocks_is_0
+ cmp %%IA1, 15
+ je %%_initial_num_blocks_is_15
+ cmp %%IA1, 14
+ je %%_initial_num_blocks_is_14
+ cmp %%IA1, 13
+ je %%_initial_num_blocks_is_13
+ cmp %%IA1, 12
+ je %%_initial_num_blocks_is_12
+ cmp %%IA1, 11
+ je %%_initial_num_blocks_is_11
+ cmp %%IA1, 10
+ je %%_initial_num_blocks_is_10
+ cmp %%IA1, 9
+ je %%_initial_num_blocks_is_9
+ cmp %%IA1, 8
+ je %%_initial_num_blocks_is_8
+ cmp %%IA1, 7
+ je %%_initial_num_blocks_is_7
+ cmp %%IA1, 6
+ je %%_initial_num_blocks_is_6
+ cmp %%IA1, 5
+ je %%_initial_num_blocks_is_5
+ cmp %%IA1, 4
+ je %%_initial_num_blocks_is_4
+ cmp %%IA1, 3
+ je %%_initial_num_blocks_is_3
+ cmp %%IA1, 2
+ je %%_initial_num_blocks_is_2
+ jmp %%_initial_num_blocks_is_1
+
+ and %%IA1, 0xf
+ je %%_initial_num_blocks_is_0
+
+ cmp %%IA1, 8
+ je %%_initial_num_blocks_is_8
+ jl %%_initial_blocks_is_1_7
+
+ ; Initial blocks 9-15
+ cmp %%IA1, 12
+ je %%_initial_num_blocks_is_12
+ jl %%_initial_blocks_is_9_11
+
+ ; Initial blocks 13-15
+ cmp %%IA1, 15
+ je %%_initial_num_blocks_is_15
+ cmp %%IA1, 14
+ je %%_initial_num_blocks_is_14
+ cmp %%IA1, 13
+ je %%_initial_num_blocks_is_13
+
+%%_initial_blocks_is_9_11:
+ cmp %%IA1, 11
+ je %%_initial_num_blocks_is_11
+ cmp %%IA1, 10
+ je %%_initial_num_blocks_is_10
+ cmp %%IA1, 9
+ je %%_initial_num_blocks_is_9
+
+%%_initial_blocks_is_1_7:
+ cmp %%IA1, 4
+ je %%_initial_num_blocks_is_4
+ jl %%_initial_blocks_is_1_3
+
+ ; Initial blocks 5-7
+ cmp %%IA1, 7
+ je %%_initial_num_blocks_is_7
+ cmp %%IA1, 6
+ je %%_initial_num_blocks_is_6
+ cmp %%IA1, 5
+ je %%_initial_num_blocks_is_5
+
+%%_initial_blocks_is_1_3:
+ cmp %%IA1, 3
+ je %%_initial_num_blocks_is_3
+ cmp %%IA1, 2
+ je %%_initial_num_blocks_is_2
+
+ jmp %%_initial_num_blocks_is_1
+
+%%_initial_num_blocks_is_15:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 15, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_14:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 14, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_13:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 13, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_12:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 12, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_11:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 11, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_10:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 10, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_9:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 9, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_8:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 8, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_7:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 7, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_6:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 6, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_5:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 5, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_4:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 4, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_3:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 3, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_2:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 2, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_1:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 1, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_0:
+ INITIAL_BLOCKS %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, 0, %%CTR_BLOCKx, \
+ %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, %%CTR_BLOCK_9_12, \
+ %%CTR_BLOCK_13_16, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, %%IA0, %%IA1, %%MASKREG, \
+ %%SHUFREG, %%NROUNDS, %%CNTR_TYPE, %%RBITS
+
+%%_initial_blocks_encrypted:
+ or %%LENGTH, %%LENGTH
+ je %%_enc_dec_done
+
+ vmovdqa64 %%ADD8REG, [rel ddq_add_16]
+ ;; Process 15 full blocks plus a partial block
+ cmp %%LENGTH, 256
+ jl %%_encrypt_by_16_partial
+
+%%_encrypt_by_16:
+ ENCRYPT_16_PARALLEL %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%DATA_OFFSET, %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, \
+ %%CTR_BLOCK_9_12, %%CTR_BLOCK_13_16, \
+ full, %%IA0, %%IA1, %%LENGTH, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%MASKREG, %%SHUFREG, %%ADD8REG, %%NROUNDS, %%CNTR_TYPE, \
+ %%RBITS
+ add %%DATA_OFFSET, 256
+ sub %%LENGTH, 256
+ cmp %%LENGTH, 256
+ jge %%_encrypt_by_16
+
+%%_encrypt_by_16_done:
+ ;; Test to see if we need a by 16 with partial block. At this point
+ ;; bytes remaining should be either zero or between 241-255.
+ or %%LENGTH, %%LENGTH
+ je %%_enc_dec_done
+
+%%_encrypt_by_16_partial:
+
+ ENCRYPT_16_PARALLEL %%KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%DATA_OFFSET, %%CTR_BLOCK_1_4, %%CTR_BLOCK_5_8, \
+ %%CTR_BLOCK_9_12, %%CTR_BLOCK_13_16, \
+ partial, %%IA0, %%IA1, %%LENGTH, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%MASKREG, %%SHUFREG, %%ADD8REG, %%NROUNDS, %%CNTR_TYPE, \
+ %%RBITS
+
+%%_enc_dec_done:
+
+%endmacro ; CNTR_ENC_DEC
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_128_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_128_submit_vaes_avx512,function,internal)
+aes_cntr_128_submit_vaes_avx512:
+ FUNC_SAVE CNTR
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 9, CNTR
+ FUNC_RESTORE CNTR
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_192_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_192_submit_vaes_avx512,function,internal)
+aes_cntr_192_submit_vaes_avx512:
+ FUNC_SAVE CNTR
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 11, CNTR
+ FUNC_RESTORE CNTR
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_256_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_256_submit_vaes_avx512,function,internal)
+aes_cntr_256_submit_vaes_avx512:
+ FUNC_SAVE CNTR
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 13, CNTR
+ FUNC_RESTORE CNTR
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_bit_128_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_bit_128_submit_vaes_avx512,function,internal)
+aes_cntr_bit_128_submit_vaes_avx512:
+ FUNC_SAVE CNTR_BIT
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 9, CNTR_BIT
+ FUNC_RESTORE CNTR_BIT
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_bit_192_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_bit_192_submit_vaes_avx512,function,internal)
+aes_cntr_bit_192_submit_vaes_avx512:
+ FUNC_SAVE CNTR_BIT
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 11, CNTR_BIT
+ FUNC_RESTORE CNTR_BIT
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_cntr_bit_256_submit_vaes_avx512 (JOB_AES_HMAC *job)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(aes_cntr_bit_256_submit_vaes_avx512,function,internal)
+aes_cntr_bit_256_submit_vaes_avx512:
+ FUNC_SAVE CNTR_BIT
+ ;; arg1 - [in] job
+ ;; arg2 - [in] NROUNDS
+ ;; arg3 - [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+ CNTR_ENC_DEC arg1, 13, CNTR_BIT
+ FUNC_RESTORE CNTR_BIT
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/des_x16_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/des_x16_avx512.asm
new file mode 100644
index 000000000..656752941
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/des_x16_avx512.asm
@@ -0,0 +1,2382 @@
+;;
+;; Copyright (c) 2017-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Authors:
+;; Shay Gueron (1, 2), Regev Shemy (2), Tomasz kantecki (2)
+;; (1) University of Haifa, Israel
+;; (2) Intel Corporation
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX R8 R9 R10 R11
+;; Windows preserves: RBX RCX RDX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX R10 R11
+;; Linux preserves: RBX RBP RSI RDI R8 R9 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31 and K1 to K7
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "mb_mgr_datastruct.asm"
+%include "constants.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%endif
+
+%define STATE arg1
+%define SIZE arg2
+
+%define OFFSET rax
+
+%define IA0 arg3
+%define IA1 arg4
+%define IA2 r10
+
+%define INP0 r11
+%define INP1 r12
+%define INP2 r13
+%define INP3 r14
+%define INP4 r15
+
+%define KSOFFSET r11
+
+%define ZW0 zmm0
+%define ZW1 zmm1
+%define ZW2 zmm2
+%define ZW3 zmm3
+%define ZW4 zmm4
+%define ZW5 zmm5
+%define ZW6 zmm6
+%define ZW7 zmm7
+%define ZW8 zmm8
+%define ZW9 zmm9
+%define ZW10 zmm10
+%define ZW11 zmm11
+%define ZW12 zmm12
+%define ZW13 zmm13
+%define ZW14 zmm14
+%define ZW15 zmm15
+
+%define ZIV0 zmm16
+%define ZIV1 zmm17
+
+%define ZTMP0 zmm18
+%define ZTMP1 zmm19
+%define ZTMP2 zmm20
+%define ZTMP3 zmm21
+%define ZTMP4 zmm22
+%define ZTMP5 zmm23
+%define ZTMP6 zmm24
+%define ZTMP7 zmm25
+%define ZTMP8 zmm26
+%define ZTMP9 zmm27
+%define ZTMP10 zmm28
+%define ZTMP11 zmm29
+%define ZTMP12 zmm30
+%define ZTMP13 zmm31
+
+struc STACKFRAME
+_key_sched: resq 16*16 ; 16 lanes x 16 qwords; 16 x 128 bytes = 2048
+_key_sched2: resq 16*16 ; 16 lanes x 16 qwords; 16 x 128 bytes = 2048
+_key_sched3: resq 16*16 ; 16 lanes x 16 qwords; 16 x 128 bytes = 2048
+_tmp_iv: resq 16 ; 2 x 64 bytes
+_tmp_in: resq 16 ; 2 x 64 bytes
+_tmp_out: resq 16 ; 2 x 64 bytes
+_tmp_mask: resd 16 ; 1 x 64 bytes
+_gpr_save: resq 4 ; r12 to r15
+_rsp_save: resq 1
+_mask_save: resq 1
+_size_save: resq 1
+endstruc
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+;;; ===========================================================================
+;;; CLEAR TRANSPOSED KEY SCHEDULE (if SAFE_DATA is selected)
+;;; ===========================================================================
+%macro CLEAR_KEY_SCHEDULE 2
+%define %%ALG %1 ; [in] DES or 3DES
+%define %%ZT %2 ; [clobbered] temporary ZMM register
+
+%ifdef SAFE_DATA
+ vpxorq %%ZT, %%ZT
+%assign rep_num (2048 / 64)
+%ifidn %%ALG, 3DES
+%assign rep_num (rep_num * 3)
+%endif
+
+%assign offset 0
+%rep rep_num
+ vmovdqa64 [rsp + _key_sched + offset], %%ZT
+%assign offset (offset + 64)
+%endrep
+
+%endif ; SAFE_DATA
+
+%endmacro
+
+;;; ===========================================================================
+;;; PERMUTE
+;;; ===========================================================================
+;;; A [in/out] - zmm register
+;;; B [in/out] - zmm register
+;;; NSHIFT [in] - constant to shift words by
+;;; MASK [in] - zmm or m512 with mask
+;;; T0 [clobbered] - temporary zmm register
+%macro PERMUTE 5
+%define %%A %1
+%define %%B %2
+%define %%NSHIFT %3
+%define %%MASK %4
+%define %%T0 %5
+
+ vpsrld %%T0, %%A, %%NSHIFT
+ vpxord %%T0, %%T0, %%B
+ vpandd %%T0, %%T0, %%MASK
+ vpxord %%B, %%B, %%T0
+ vpslld %%T0, %%T0, %%NSHIFT
+ vpxord %%A, %%A, %%T0
+%endmacro
+
+;;; ===========================================================================
+;;; INITIAL PERMUTATION
+;;; ===========================================================================
+;;; L [in/out] - zmm register
+;;; R [in/out] - zmm register
+;;; T0 [clobbered] - temporary zmm register
+%macro IP_Z 3
+%define %%L %1
+%define %%R %2
+%define %%T0 %3
+ PERMUTE %%R, %%L, 4, [rel init_perm_consts + 0*64], %%T0
+ PERMUTE %%L, %%R, 16, [rel init_perm_consts + 1*64], %%T0
+ PERMUTE %%R, %%L, 2, [rel init_perm_consts + 2*64], %%T0
+ PERMUTE %%L, %%R, 8, [rel init_perm_consts + 3*64], %%T0
+ PERMUTE %%R, %%L, 1, [rel init_perm_consts + 4*64], %%T0
+%endmacro
+
+;;; ===========================================================================
+;;; FINAL PERMUTATION
+;;; ===========================================================================
+;;; L [in/out] - zmm register
+;;; R [in/out] - zmm register
+;;; T0 [clobbered] - temporary zmm register
+%macro FP_Z 3
+%define %%L %1
+%define %%R %2
+%define %%T0 %3
+ PERMUTE %%L, %%R, 1, [rel init_perm_consts + 4*64], %%T0
+ PERMUTE %%R, %%L, 8, [rel init_perm_consts + 3*64], %%T0
+ PERMUTE %%L, %%R, 2, [rel init_perm_consts + 2*64], %%T0
+ PERMUTE %%R, %%L, 16, [rel init_perm_consts + 1*64], %%T0
+ PERMUTE %%L, %%R, 4, [rel init_perm_consts + 0*64], %%T0
+%endmacro
+
+;;; ===========================================================================
+;;; P PHASE
+;;; ===========================================================================
+;;; W0 [in/out] - zmm register
+;;; in: vector of 16 x 32bits from S phase
+;;; out: permuted in vector
+;;; T0-T3 [clobbered] - temporary zmm register
+%macro P_PHASE 5
+%define %%W0 %1
+%define %%T0 %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+
+ vprord %%T0, %%W0, 3
+ vpandd %%T0, %%T0, [rel mask_values + 0*64]
+ vprord %%T1, %%W0, 5
+ vpandd %%T1, %%T1, [rel mask_values + 1*64]
+ vpord %%T0, %%T0, %%T1
+
+ vprord %%T1, %%W0, 24
+ vpandd %%T1, %%T1, [rel mask_values + 2*64]
+ vprord %%T2, %%W0, 26
+ vpandd %%T2, %%T2, [rel mask_values + 3*64]
+ vpord %%T1, %%T1, %%T2
+ vpord %%T0, %%T0, %%T1
+
+ vprord %%T1, %%W0, 15
+ vpandd %%T1, %%T1, [rel mask_values + 4*64]
+ vprord %%T2, %%W0, 17
+ vpandd %%T2, %%T2, [rel mask_values + 5*64]
+ vpord %%T1, %%T1, %%T2
+
+ vprord %%T2, %%W0, 6
+ vpandd %%T2, %%T2, [rel mask_values + 6*64]
+ vprord %%T3, %%W0, 21
+ vpandd %%T3, %%T3, [rel mask_values + 7*64]
+ vpord %%T2, %%T2, %%T3
+ vpord %%T1, %%T1, %%T2
+ vpord %%T0, %%T0, %%T1
+
+ vprord %%T1, %%W0, 12
+ vpandd %%T1, %%T1, [rel mask_values + 8*64]
+ vprord %%T2, %%W0, 14
+ vpandd %%T2, %%T2, [rel mask_values + 9*64]
+ vpord %%T1, %%T1, %%T2
+
+ vprord %%T2, %%W0, 4
+ vpandd %%T2, %%T2, [rel mask_values + 10*64]
+ vprord %%T3, %%W0, 11
+ vpandd %%T3, %%T3, [rel mask_values + 11*64]
+ vpord %%T2, %%T2, %%T3
+ vpord %%T1, %%T1, %%T2
+ vpord %%T0, %%T0, %%T1
+
+ vprord %%T1, %%W0, 16
+ vpandd %%T1, %%T1, [rel mask_values + 12*64]
+ vprord %%T2, %%W0, 22
+ vpandd %%T2, %%T2, [rel mask_values + 13*64]
+ vpord %%T1, %%T1, %%T2
+
+ vprord %%T2, %%W0, 19
+ vpandd %%T2, %%T2, [rel mask_values + 14*64]
+ vprord %%T3, %%W0, 10
+ vpandd %%T3, %%T3, [rel mask_values + 15*64]
+ vpord %%T2, %%T2, %%T3
+ vpord %%T1, %%T1, %%T2
+ vpord %%T0, %%T0, %%T1
+
+ vprord %%T1, %%W0, 9
+ vpandd %%T1, %%T1, [rel mask_values + 16*64]
+ vprord %%T2, %%W0, 13
+ vpandd %%T2, %%T2, [rel mask_values + 17*64]
+ vpord %%T1, %%T1, %%T2
+
+ vprord %%T2, %%W0, 25
+ vpandd %%T2, %%T2, [rel mask_values + 18*64]
+ vpord %%T1, %%T1, %%T2
+ vpord %%W0, %%T0, %%T1
+%endmacro
+
+;;; ===========================================================================
+;;; E PHASE
+;;; ===========================================================================
+;;;
+;;; Expands 16x32-bit words into 16x48-bit words
+;;; plus XOR's result with the key schedule.
+;;; The output is adjusted to be friendly as S phase input.
+;;;
+;;; in [in] - zmm register
+;;; out0a [out] - zmm register
+;;; out0b [out] - zmm register
+;;; out1a [out] - zmm register
+;;; out1b [out] - zmm register
+;;; k0 [in] - key schedule; zmm or m512
+;;; k1 [in] - key schedule; zmm or m512
+;;; t0-t1 [clobbered] - temporary zmm register
+%macro E_PHASE 9
+%define %%IN %1
+%define %%OUT0A %2
+%define %%OUT0B %3
+%define %%OUT1A %4
+%define %%OUT1B %5
+%define %%K0 %6
+%define %%K1 %7
+%define %%T0 %8
+%define %%T1 %9
+
+ vprord %%T0, %%IN, 31
+ vprord %%T1, %%IN, 3
+ vpshufb %%T0, %%T0, [rel idx_e]
+ vpshufb %%T1, %%T1, [rel idx_e]
+ vpunpcklbw %%OUT0A, %%T0, %%T1
+ vpunpckhbw %%OUT1A, %%T0, %%T1
+ vpxord %%OUT0A, %%OUT0A, %%K0
+ vpxord %%OUT1A, %%OUT1A, %%K1
+ vpandd %%OUT0B, %%OUT0A, [rel and_eu]
+ vpsrlw %%OUT0B, %%OUT0B, 8
+ vpandd %%OUT0A, %%OUT0A, [rel and_ed]
+ vpandd %%OUT1B, %%OUT1A, [rel and_eu]
+ vpsrlw %%OUT1B, %%OUT1B, 8
+ vpandd %%OUT1A, %%OUT1A, [rel and_ed]
+%endmacro
+
+;;; ===========================================================================
+;;; S-BOX
+;;; ===========================================================================
+;;;
+;;; NOTE: clobbers k1-k6 OpMask registers
+;;;
+;;; IN0A [in] - zmm register; output from E-phase
+;;; IN0B [in] - zmm register; output from E-phase
+;;; IN1A [in] - zmm register; output from E-phase
+;;; IN1B [in] - zmm register; output from E-phase
+;;; OUT [out] - zmm register; output from E-phase
+;;; T0-T5 [clobbered] - temporary zmm register
+%macro S_PHASE 11
+%define %%IN0A %1
+%define %%IN0B %2
+%define %%IN1A %3
+%define %%IN1B %4
+%define %%OUT %5
+%define %%T0 %6
+%define %%T1 %7
+%define %%T2 %8
+%define %%T3 %9
+%define %%T4 %10
+%define %%T5 %11
+
+ vmovdqa64 %%T0, [rel reg_values16bit_7]
+ vpcmpuw k3, %%IN0A, %%T0, 2 ; 2 -> LE
+ vpcmpuw k4, %%IN0B, %%T0, 2 ; 2 -> LE
+ vpcmpuw k5, %%IN1A, %%T0, 2 ; 2 -> LE
+ vpcmpuw k6, %%IN1B, %%T0, 2 ; 2 -> LE
+
+ mov DWORD(IA0), 0x55555555
+ kmovd k1, DWORD(IA0)
+ mov DWORD(IA0), 0xaaaaaaaa
+ kmovd k2, DWORD(IA0)
+
+ vpermw %%T0{k1}{z}, %%IN0A, [rel S_box_flipped + 0*64]
+ vpermw %%T1{k1}{z}, %%IN0A, [rel S_box_flipped + 1*64]
+ vpermw %%T2{k2}{z}, %%IN0A, [rel S_box_flipped + 4*64]
+ vpermw %%T3{k2}{z}, %%IN0A, [rel S_box_flipped + 5*64]
+ vpxord %%T0, %%T0, %%T2
+ vpxord %%OUT, %%T1, %%T3
+ vmovdqu16 %%OUT{k3}, %%T0
+
+ vpermw %%T0{k1}{z}, %%IN0B, [rel S_box_flipped + 2*64]
+ vpermw %%T1{k1}{z}, %%IN0B, [rel S_box_flipped + 3*64]
+ vpermw %%T2{k2}{z}, %%IN0B, [rel S_box_flipped + 6*64]
+ vpermw %%T3{k2}{z}, %%IN0B, [rel S_box_flipped + 7*64]
+ vpxord %%T0, %%T0, %%T2
+ vpxord %%T3, %%T1, %%T3
+ vmovdqu16 %%T3{k4}, %%T0
+ vpsllw %%T3, %%T3, 4
+ vpxord %%OUT, %%OUT, %%T3
+
+ vpermw %%T0{k1}{z}, %%IN1A, [rel S_box_flipped + 8*64]
+ vpermw %%T1{k1}{z}, %%IN1A, [rel S_box_flipped + 9*64]
+ vpermw %%T2{k2}{z}, %%IN1A, [rel S_box_flipped + 12*64]
+ vpermw %%T3{k2}{z}, %%IN1A, [rel S_box_flipped + 13*64]
+ vpxord %%T0, %%T0, %%T2
+ vpxord %%T4, %%T1, %%T3
+ vmovdqu16 %%T4{k5}, %%T0
+
+ vpermw %%T0{k1}{z}, %%IN1B, [rel S_box_flipped + 10*64]
+ vpermw %%T1{k1}{z}, %%IN1B, [rel S_box_flipped + 11*64]
+ vpermw %%T2{k2}{z}, %%IN1B, [rel S_box_flipped + 14*64]
+ vpermw %%T3{k2}{z}, %%IN1B, [rel S_box_flipped + 15*64]
+ vpxord %%T0, %%T0, %%T2
+ vpxord %%T5, %%T1, %%T3
+ vmovdqu16 %%T5{k6}, %%T0
+ vpsllw %%T5, %%T5, 4
+
+ vpxord %%T4, %%T4, %%T5
+ vpsllw %%T4, %%T4, 8
+ vpxord %%OUT, %%OUT, %%T4
+ vpshufb %%OUT, %%OUT, [rel shuffle_reg]
+%endmacro
+
+;;; ===========================================================================
+;;; DES encryption/decryption round
+;;; ===========================================================================
+;;;
+;;; Clobbers k1-k6 OpMask registers
+;;;
+;;; ENC_DEC [in] - ENC for encryption, DEC for decryption
+;;; R [in/out] - zmm register; plain text in & cipher text out
+;;; L [in/out] - zmm register; plain text in & cipher text out
+;;; KS [in] - pointer to the key schedule
+;;; T0-T11 [clobbered] - temporary zmm register
+%macro DES_ENC_DEC 16
+%define %%ENC_DEC %1
+%define %%R %2
+%define %%L %3
+%define %%KS %4
+%define %%T0 %5
+%define %%T1 %6
+%define %%T2 %7
+%define %%T3 %8
+%define %%T4 %9
+%define %%T5 %10
+%define %%T6 %11
+%define %%T7 %12
+%define %%T8 %13
+%define %%T9 %14
+%define %%T10 %15
+%define %%T11 %16
+
+ IP_Z %%R, %%L, %%T0
+
+%ifidn %%ENC_DEC, ENC
+ ;; ENCRYPTION
+ xor KSOFFSET, KSOFFSET
+%%_des_enc_loop:
+ E_PHASE %%R, %%T1, %%T2, %%T3, %%T4, [%%KS + KSOFFSET + (0*64)], [%%KS + KSOFFSET + (1*64)], %%T6, %%T7
+ S_PHASE %%T1, %%T2, %%T3, %%T4, %%T0, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11
+ P_PHASE %%T0, %%T1, %%T2, %%T3, %%T4
+ vpxord %%L, %%L, %%T0
+
+ E_PHASE %%L, %%T1, %%T2, %%T3, %%T4, [%%KS + KSOFFSET + (2*64)], [%%KS + KSOFFSET + (3*64)], %%T6, %%T7
+ S_PHASE %%T1, %%T2, %%T3, %%T4, %%T0, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11
+ P_PHASE %%T0, %%T1, %%T2, %%T3, %%T4
+ vpxord %%R, %%R, %%T0
+
+ add KSOFFSET, (4*64)
+ cmp KSOFFSET, (8*(4*64))
+ jb %%_des_enc_loop
+
+%else
+ ;; DECRYPTION
+ mov KSOFFSET, (8*(4*64))
+%%_des_dec_loop:
+ E_PHASE %%R, %%T1, %%T2, %%T3, %%T4, [%%KS + KSOFFSET - (2*64)], [%%KS + KSOFFSET - (1*64)], %%T6, %%T7
+ S_PHASE %%T1, %%T2, %%T3, %%T4, %%T0, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11
+ P_PHASE %%T0, %%T1, %%T2, %%T3, %%T4
+ vpxord %%L, %%L, %%T0
+
+ E_PHASE %%L, %%T1, %%T2, %%T3, %%T4, [%%KS + KSOFFSET - (4*64)], [%%KS + KSOFFSET - (3*64)], %%T6, %%T7
+ S_PHASE %%T1, %%T2, %%T3, %%T4, %%T0, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11
+ P_PHASE %%T0, %%T1, %%T2, %%T3, %%T4
+ vpxord %%R, %%R, %%T0
+ sub KSOFFSET, (4*64)
+ jnz %%_des_dec_loop
+%endif ; DECRYPTION
+
+ FP_Z %%R, %%L, %%T0
+%endmacro
+
+;;; ===========================================================================
+;;; DATA TRANSPOSITION AT DATA INPUT
+;;; ===========================================================================
+;;;
+;;; IN00 - IN15 [in/out]:
+;;; in: IN00 - lane 0 data, IN01 - lane 1 data, ... IN15 - lane 15 data
+;;; out: R0 - 16 x word0, L0 - 16 x word1, ... L7 - 16 x word15
+;;; T0-T3 [clobbered] - temporary zmm registers
+;;; K0-K5 [clobbered] - temporary zmm registers
+;;; H0-H3 [clobbered] - temporary zmm registers
+%macro TRANSPOSE_IN 30
+%define %%IN00 %1 ; R0
+%define %%IN01 %2 ; L0
+%define %%IN02 %3 ; R1
+%define %%IN03 %4 ; L1
+%define %%IN04 %5 ; R2
+%define %%IN05 %6 ; L2
+%define %%IN06 %7 ; R3
+%define %%IN07 %8 ; L3
+%define %%IN08 %9 ; R4
+%define %%IN09 %10 ; L4
+%define %%IN10 %11 ; R5
+%define %%IN11 %12 ; L5
+%define %%IN12 %13 ; R6
+%define %%IN13 %14 ; L6
+%define %%IN14 %15 ; R7
+%define %%IN15 %16 ; L7
+%define %%T0 %17
+%define %%T1 %18
+%define %%T2 %19
+%define %%T3 %20
+%define %%K0 %21
+%define %%K1 %22
+%define %%K2 %23
+%define %%K3 %24
+%define %%K4 %25
+%define %%K5 %26
+%define %%H0 %27
+%define %%H1 %28
+%define %%H2 %29
+%define %%H3 %30
+
+ vpunpckldq %%K0, %%IN00, %%IN01
+ vpunpckhdq %%K1, %%IN00, %%IN01
+ vpunpckldq %%T0, %%IN02, %%IN03
+ vpunpckhdq %%T1, %%IN02, %%IN03
+
+ vpunpckldq %%IN00, %%IN04, %%IN05
+ vpunpckhdq %%IN01, %%IN04, %%IN05
+ vpunpckldq %%IN02, %%IN06, %%IN07
+ vpunpckhdq %%IN03, %%IN06, %%IN07
+
+ vpunpcklqdq %%K2, %%K0, %%T0
+ vpunpckhqdq %%T2, %%K0, %%T0
+ vpunpcklqdq %%K3, %%K1, %%T1
+ vpunpckhqdq %%T3, %%K1, %%T1
+
+ vpunpcklqdq %%K0, %%IN00, %%IN02
+ vpunpckhqdq %%K1, %%IN00, %%IN02
+ vpunpcklqdq %%T0, %%IN01, %%IN03
+ vpunpckhqdq %%T1, %%IN01, %%IN03
+
+ vpunpckldq %%K4, %%IN08, %%IN09
+ vpunpckhdq %%K5, %%IN08, %%IN09
+ vpunpckldq %%IN04, %%IN10, %%IN11
+ vpunpckhdq %%IN05, %%IN10, %%IN11
+ vpunpckldq %%IN06, %%IN12, %%IN13
+ vpunpckhdq %%IN07, %%IN12, %%IN13
+ vpunpckldq %%IN10, %%IN14, %%IN15
+ vpunpckhdq %%IN11, %%IN14, %%IN15
+
+ vpunpcklqdq %%IN12, %%K4, %%IN04
+ vpunpckhqdq %%IN13, %%K4, %%IN04
+ vpunpcklqdq %%IN14, %%K5, %%IN05
+ vpunpckhqdq %%IN15, %%K5, %%IN05
+ vpunpcklqdq %%IN00, %%IN06, %%IN10
+ vpunpckhqdq %%IN01, %%IN06, %%IN10
+ vpunpcklqdq %%IN02, %%IN07, %%IN11
+ vpunpckhqdq %%IN03, %%IN07, %%IN11
+
+ vshufi64x2 %%H0, %%K2, %%K0, 0x44
+ vshufi64x2 %%H1, %%K2, %%K0, 0xee
+ vshufi64x2 %%H2, %%IN12, %%IN00, 0x44
+ vshufi64x2 %%H3, %%IN12, %%IN00, 0xee
+ vshufi64x2 %%IN00, %%H0, %%H2, 0x88 ; R0
+ vshufi64x2 %%IN04, %%H0, %%H2, 0xdd ; R2
+ vshufi64x2 %%IN08, %%H1, %%H3, 0x88 ; R4
+ vshufi64x2 %%IN12, %%H1, %%H3, 0xdd ; R6
+
+ vshufi64x2 %%H0, %%T2, %%K1, 0x44
+ vshufi64x2 %%H1, %%T2, %%K1, 0xee
+ vshufi64x2 %%H2, %%IN13, %%IN01, 0x44
+ vshufi64x2 %%H3, %%IN13, %%IN01, 0xee
+ vshufi64x2 %%IN01, %%H0, %%H2, 0x88 ; L0
+ vshufi64x2 %%IN05, %%H0, %%H2, 0xdd ; L2
+ vshufi64x2 %%IN09, %%H1, %%H3, 0x88 ; L4
+ vshufi64x2 %%IN13, %%H1, %%H3, 0xdd ; L6
+
+ vshufi64x2 %%H0, %%K3, %%T0, 0x44
+ vshufi64x2 %%H1, %%K3, %%T0, 0xee
+ vshufi64x2 %%H2, %%IN14, %%IN02, 0x44
+ vshufi64x2 %%H3, %%IN14, %%IN02, 0xee
+ vshufi64x2 %%IN02, %%H0, %%H2, 0x88 ; R1
+ vshufi64x2 %%IN06, %%H0, %%H2, 0xdd ; R3
+ vshufi64x2 %%IN10, %%H1, %%H3, 0x88 ; R5
+ vshufi64x2 %%IN14, %%H1, %%H3, 0xdd ; R7
+
+ vshufi64x2 %%H0, %%T3, %%T1, 0x44
+ vshufi64x2 %%H1, %%T3, %%T1, 0xee
+ vshufi64x2 %%H2, %%IN15, %%IN03, 0x44
+ vshufi64x2 %%H3, %%IN15, %%IN03, 0xee
+ vshufi64x2 %%IN03, %%H0, %%H2, 0x88 ; L1
+ vshufi64x2 %%IN07, %%H0, %%H2, 0xdd ; L3
+ vshufi64x2 %%IN11, %%H1, %%H3, 0x88 ; L5
+ vshufi64x2 %%IN15, %%H1, %%H3, 0xdd ; L7
+%endmacro
+
+;;; ===========================================================================
+;;; DATA TRANSPOSITION AT DATA OUTPUT
+;;; ===========================================================================
+;;;
+;;; IN00-IN15 aka R0/L0 - R7/L7 [in/out]:
+;;; in: R0 - 16 x word0, L0 - 16 x word1, ... L7 - 16 x word15
+;;; out: R0 - lane 0 data, L0 - lane 1 data, ... L7 - lane 15 data
+;;; T0-T3 [clobbered] - temporary zmm registers
+;;; K0-K5 [clobbered] - temporary zmm registers
+;;; H0-H3 [clobbered] - temporary zmm registers
+%macro TRANSPOSE_OUT 30
+%define %%IN00 %1 ; R0
+%define %%IN01 %2 ; L0
+%define %%IN02 %3 ; R1
+%define %%IN03 %4 ; L1
+%define %%IN04 %5 ; R2
+%define %%IN05 %6 ; L2
+%define %%IN06 %7 ; R3
+%define %%IN07 %8 ; L3
+%define %%IN08 %9 ; R4
+%define %%IN09 %10 ; L4
+%define %%IN10 %11 ; R5
+%define %%IN11 %12 ; L5
+%define %%IN12 %13 ; R6
+%define %%IN13 %14 ; L6
+%define %%IN14 %15 ; R7
+%define %%IN15 %16 ; L7
+%define %%T0 %17
+%define %%T1 %18
+%define %%T2 %19
+%define %%T3 %20
+%define %%K0 %21
+%define %%K1 %22
+%define %%K2 %23
+%define %%K3 %24
+%define %%K4 %25
+%define %%K5 %26
+%define %%H0 %27
+%define %%H1 %28
+%define %%H2 %29
+%define %%H3 %30
+
+ vpunpckldq %%K0, %%IN01, %%IN00
+ vpunpckhdq %%K1, %%IN01, %%IN00
+ vpunpckldq %%T0, %%IN03, %%IN02
+ vpunpckhdq %%T1, %%IN03, %%IN02
+
+ vpunpckldq %%IN00, %%IN05, %%IN04
+ vpunpckhdq %%IN01, %%IN05, %%IN04
+ vpunpckldq %%IN02, %%IN07, %%IN06
+ vpunpckhdq %%IN03, %%IN07, %%IN06
+
+ vpunpcklqdq %%K2, %%K0, %%T0
+ vpunpckhqdq %%T2, %%K0, %%T0
+ vpunpcklqdq %%K3, %%K1, %%T1
+ vpunpckhqdq %%T3, %%K1, %%T1
+
+ vpunpcklqdq %%K0, %%IN00, %%IN02
+ vpunpckhqdq %%K1, %%IN00, %%IN02
+ vpunpcklqdq %%T0, %%IN01, %%IN03
+ vpunpckhqdq %%T1, %%IN01, %%IN03
+
+ vpunpckldq %%K4, %%IN09, %%IN08
+ vpunpckhdq %%K5, %%IN09, %%IN08
+ vpunpckldq %%IN04, %%IN11, %%IN10
+ vpunpckhdq %%IN05, %%IN11, %%IN10
+ vpunpckldq %%IN06, %%IN13, %%IN12
+ vpunpckhdq %%IN07, %%IN13, %%IN12
+ vpunpckldq %%IN10, %%IN15, %%IN14
+ vpunpckhdq %%IN11, %%IN15, %%IN14
+
+ vpunpcklqdq %%IN12, %%K4, %%IN04
+ vpunpckhqdq %%IN13, %%K4, %%IN04
+ vpunpcklqdq %%IN14, %%K5, %%IN05
+ vpunpckhqdq %%IN15, %%K5, %%IN05
+ vpunpcklqdq %%IN00, %%IN06, %%IN10
+ vpunpckhqdq %%IN01, %%IN06, %%IN10
+ vpunpcklqdq %%IN02, %%IN07, %%IN11
+ vpunpckhqdq %%IN03, %%IN07, %%IN11
+
+ vshufi64x2 %%H0, %%K2, %%K0, 0x44
+ vshufi64x2 %%H1, %%K2, %%K0, 0xee
+ vshufi64x2 %%H2, %%IN12, %%IN00, 0x44
+ vshufi64x2 %%H3, %%IN12, %%IN00, 0xee
+ vshufi64x2 %%IN00, %%H0, %%H2, 0x88 ; R0
+ vshufi64x2 %%IN04, %%H0, %%H2, 0xdd ; R2
+ vshufi64x2 %%IN08, %%H1, %%H3, 0x88 ; R4
+ vshufi64x2 %%IN12, %%H1, %%H3, 0xdd ; R6
+
+ vshufi64x2 %%H0, %%T2, %%K1, 0x44
+ vshufi64x2 %%H1, %%T2, %%K1, 0xee
+ vshufi64x2 %%H2, %%IN13, %%IN01, 0x44
+ vshufi64x2 %%H3, %%IN13, %%IN01, 0xee
+ vshufi64x2 %%IN01, %%H0, %%H2, 0x88 ; L0
+ vshufi64x2 %%IN05, %%H0, %%H2, 0xdd ; L2
+ vshufi64x2 %%IN09, %%H1, %%H3, 0x88 ; L4
+ vshufi64x2 %%IN13, %%H1, %%H3, 0xdd ; L6
+
+ vshufi64x2 %%H0, %%K3, %%T0, 0x44
+ vshufi64x2 %%H1, %%K3, %%T0, 0xee
+ vshufi64x2 %%H2, %%IN14, %%IN02, 0x44
+ vshufi64x2 %%H3, %%IN14, %%IN02, 0xee
+ vshufi64x2 %%IN02, %%H0, %%H2, 0x88 ; R1
+ vshufi64x2 %%IN06, %%H0, %%H2, 0xdd ; R3
+ vshufi64x2 %%IN10, %%H1, %%H3, 0x88 ; R5
+ vshufi64x2 %%IN14, %%H1, %%H3, 0xdd ; R7
+
+ vshufi64x2 %%H0, %%T3, %%T1, 0x44
+ vshufi64x2 %%H1, %%T3, %%T1, 0xee
+ vshufi64x2 %%H2, %%IN15, %%IN03, 0x44
+ vshufi64x2 %%H3, %%IN15, %%IN03, 0xee
+ vshufi64x2 %%IN03, %%H0, %%H2, 0x88 ; L1
+ vshufi64x2 %%IN07, %%H0, %%H2, 0xdd ; L3
+ vshufi64x2 %%IN11, %%H1, %%H3, 0x88 ; L5
+ vshufi64x2 %%IN15, %%H1, %%H3, 0xdd ; L7
+%endmacro
+
+;;; ===========================================================================
+;;; DATA TRANSPOSITION OF ONE DES BLOCK AT DATA INPUT
+;;; ===========================================================================
+;;;
+;;; IN00-IN15 / R0/L0-R7/L7 [in/out]:
+;;; in: IN00 - lane 0 data, IN01 - lane 1 data, ... IN15 - lane 15 data
+;;; out: R0 - 16 x word0, L0 - 16 x word1
+;;; T0,T2 [clobbered] - temporary zmm registers
+;;; K0-K4 [clobbered] - temporary zmm registers
+;;; H0,H2 [clobbered] - temporary zmm registers
+%macro TRANSPOSE_IN_ONE 24
+%define %%IN00 %1 ; R0
+%define %%IN01 %2 ; L0
+%define %%IN02 %3 ; R1
+%define %%IN03 %4 ; L1
+%define %%IN04 %5 ; R2
+%define %%IN05 %6 ; L2
+%define %%IN06 %7 ; R3
+%define %%IN07 %8 ; L3
+%define %%IN08 %9 ; R4
+%define %%IN09 %10 ; L4
+%define %%IN10 %11 ; R5
+%define %%IN11 %12 ; L5
+%define %%IN12 %13 ; R6
+%define %%IN13 %14 ; L6
+%define %%IN14 %15 ; R7
+%define %%IN15 %16 ; L7
+%define %%T0 %17
+%define %%T2 %18
+%define %%K0 %19
+%define %%K1 %20
+%define %%K2 %21
+%define %%K4 %22
+%define %%H0 %23
+%define %%H2 %24
+
+ vpunpckldq %%K0, %%IN00, %%IN01
+ vpunpckhdq %%K1, %%IN00, %%IN01
+ vpunpckldq %%T0, %%IN02, %%IN03
+
+ vpunpckldq %%IN00, %%IN04, %%IN05
+ vpunpckhdq %%IN01, %%IN04, %%IN05
+ vpunpckldq %%IN02, %%IN06, %%IN07
+
+ vpunpcklqdq %%K2, %%K0, %%T0
+ vpunpckhqdq %%T2, %%K0, %%T0
+
+ vpunpcklqdq %%K0, %%IN00, %%IN02
+ vpunpckhqdq %%K1, %%IN00, %%IN02
+
+ vpunpckldq %%K4, %%IN08, %%IN09
+ vpunpckldq %%IN04, %%IN10, %%IN11
+ vpunpckldq %%IN06, %%IN12, %%IN13
+ vpunpckldq %%IN10, %%IN14, %%IN15
+
+ vpunpcklqdq %%IN12, %%K4, %%IN04
+ vpunpckhqdq %%IN13, %%K4, %%IN04
+ vpunpcklqdq %%IN00, %%IN06, %%IN10
+ vpunpckhqdq %%IN01, %%IN06, %%IN10
+
+ vshufi64x2 %%H0, %%K2, %%K0, 0x44
+ vshufi64x2 %%H2, %%IN12, %%IN00, 0x44
+ vshufi64x2 %%IN00, %%H0, %%H2, 0x88 ; R0
+
+ vshufi64x2 %%H0, %%T2, %%K1, 0x44
+ vshufi64x2 %%H2, %%IN13, %%IN01, 0x44
+ vshufi64x2 %%IN01, %%H0, %%H2, 0x88 ; L0
+%endmacro
+
+;;; ===========================================================================
+;;; DATA TRANSPOSITION OF ONE DES BLOCK AT DATA OUTPUT
+;;; ===========================================================================
+;;;
+;;; IN00-IN15 aka R0/L0 - R7/L7 [in/out]:
+;;; in: R0 - 16 x word0, L0 - 16 x word1
+;;; out: R0 - lane 0 data, L0 - lane 1 data, ... L7 - lane 15 data
+;;; T0-T3 [clobbered] - temporary zmm registers
+;;; K0-K3 [clobbered] - temporary zmm registers
+;;; H0,H1 [clobbered] - temporary zmm registers
+%macro TRANSPOSE_OUT_ONE 25
+%define %%IN00 %1 ; R0
+%define %%IN01 %2 ; L0
+%define %%IN02 %3 ; R1
+%define %%IN03 %4 ; L1
+%define %%IN04 %5 ; R2
+%define %%IN05 %6 ; L2
+%define %%IN06 %7 ; R3
+%define %%IN07 %8 ; L3
+%define %%IN08 %9 ; R4
+%define %%IN09 %10 ; L4
+%define %%IN10 %11 ; R5
+%define %%IN11 %12 ; L5
+%define %%IN12 %13 ; R6
+%define %%IN13 %14 ; L6
+%define %%IN14 %15 ; R7
+%define %%IN15 %16 ; L7
+%define %%T0 %17
+%define %%T2 %18
+%define %%T3 %19
+%define %%K0 %20
+%define %%K1 %21
+%define %%K2 %22
+%define %%K3 %23
+%define %%H0 %24
+%define %%H1 %25
+
+ vpxord %%T0, %%T0, %%T0
+
+ vpunpckldq %%K0, %%IN01, %%IN00
+ vpunpckhdq %%K1, %%IN01, %%IN00
+
+ vpunpcklqdq %%K2, %%K0, %%T0
+ vpunpckhqdq %%T2, %%K0, %%T0
+ vpunpcklqdq %%K3, %%K1, %%T0
+ vpunpckhqdq %%T3, %%K1, %%T0
+
+ vshufi64x2 %%H0, %%K2, %%T0, 0x44
+ vshufi64x2 %%H1, %%K2, %%T0, 0xee
+ vshufi64x2 %%IN00, %%H0, %%T0, 0x88 ; R0
+ vshufi64x2 %%IN04, %%H0, %%T0, 0xdd ; R2
+ vshufi64x2 %%IN08, %%H1, %%T0, 0x88 ; R4
+ vshufi64x2 %%IN12, %%H1, %%T0, 0xdd ; R6
+
+ vshufi64x2 %%H0, %%T2, %%T0, 0x44
+ vshufi64x2 %%H1, %%T2, %%T0, 0xee
+ vshufi64x2 %%IN01, %%H0, %%T0, 0x88 ; L0
+ vshufi64x2 %%IN05, %%H0, %%T0, 0xdd ; L2
+ vshufi64x2 %%IN09, %%H1, %%T0, 0x88 ; L4
+ vshufi64x2 %%IN13, %%H1, %%T0, 0xdd ; L6
+
+ vshufi64x2 %%H0, %%K3, %%T0, 0x44
+ vshufi64x2 %%H1, %%K3, %%T0, 0xee
+ vshufi64x2 %%IN02, %%H0, %%T0, 0x88 ; R1
+ vshufi64x2 %%IN06, %%H0, %%T0, 0xdd ; R3
+ vshufi64x2 %%IN10, %%H1, %%T0, 0x88 ; R5
+ vshufi64x2 %%IN14, %%H1, %%T0, 0xdd ; R7
+
+ vshufi64x2 %%H0, %%T3, %%T0, 0x44
+ vshufi64x2 %%H1, %%T3, %%T0, 0xee
+ vshufi64x2 %%IN03, %%H0, %%T0, 0x88 ; L1
+ vshufi64x2 %%IN07, %%H0, %%T0, 0xdd ; L3
+ vshufi64x2 %%IN11, %%H1, %%T0, 0x88 ; L5
+ vshufi64x2 %%IN15, %%H1, %%T0, 0xdd ; L7
+%endmacro
+
+;;; ===========================================================================
+;;; DES INITIALIZATION
+;;; key schedule transposition and IV set up
+;;; ===========================================================================
+;;;
+;;; STATE_KEYS [in] - KEYS in DES OOO STATE
+;;; STATE_IV [ in] - IV in DES OOO STATE
+;;; KS [out] - place to store transposed key schedule or NULL
+;;; IV0 [out] - r512; initialization vector
+;;; IV1 [out] - r512; initialization vector
+;;; T0-T27 [clobbered] - temporary r512
+%macro DES_INIT 33
+%define %%STATE_KEYS %1
+%define %%STATE_IV %2
+%define %%KS %3
+%define %%IV0 %4
+%define %%IV1 %5
+%define %%T0 %6
+%define %%T1 %7
+%define %%T2 %8
+%define %%T3 %9
+%define %%T4 %10
+%define %%T5 %11
+%define %%T6 %12
+%define %%T7 %13
+%define %%T8 %14
+%define %%T9 %15
+%define %%T10 %16
+%define %%T11 %17
+%define %%T12 %18
+%define %%T13 %19
+%define %%T14 %20
+%define %%T15 %21
+%define %%T16 %22
+%define %%T17 %23
+%define %%T18 %24
+%define %%T19 %25
+%define %%T20 %26
+%define %%T21 %27
+%define %%T22 %28
+%define %%T23 %29
+%define %%T24 %30
+%define %%T25 %31
+%define %%T26 %32
+%define %%T27 %33
+
+ ;; set up the key schedule
+ ;; - load first half of the keys & transpose
+ ;; - transpose and store
+ ;; note: we can use IV registers as temprary ones here
+%assign IDX 0
+%rep 16
+ mov IA0, [%%STATE_KEYS + (IDX*PTR_SZ)]
+ vmovdqu64 %%T %+ IDX, [IA0]
+%assign IDX (IDX + 1)
+%endrep
+ TRANSPOSE_IN %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23, %%T24, %%T25, %%T26, %%T27, %%IV0, %%IV1
+%assign IDX 0
+%rep 16
+ vmovdqu64 [%%KS + (IDX * 64)], %%T %+ IDX
+%assign IDX (IDX + 1)
+%endrep
+ ;; - load second half of the keys & transpose
+ ;; - transpose and store
+ ;; note: we can use IV registers as temprary ones here
+%assign IDX 0
+%rep 16
+ mov IA0, [%%STATE_KEYS + (IDX*PTR_SZ)]
+ vmovdqu64 %%T %+ IDX, [IA0 + 64]
+%assign IDX (IDX + 1)
+%endrep
+ TRANSPOSE_IN %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23, %%T24, %%T25, %%T26, %%T27, %%IV0, %%IV1
+%assign IDX 0
+%rep 16
+ vmovdqu64 [%%KS + (16 * 64) + (IDX * 64)], %%T %+ IDX
+%assign IDX (IDX + 1)
+%endrep
+
+ ;; set up IV
+ ;; - they are already kept transposed so this is enough to load them
+ vmovdqu64 %%IV0, [%%STATE_IV + (0 * 64)]
+ vmovdqu64 %%IV1, [%%STATE_IV + (1 * 64)]
+%endmacro
+
+;;; ===========================================================================
+;;; 3DES INITIALIZATION
+;;; key schedule transposition and IV set up
+;;; ===========================================================================
+;;;
+;;; STATE_KEYS [in] - KEYS in 3DES OOO STATE
+;;; STATE_IV [ in] - IV in 3DES OOO STATE
+;;; KS1 [out] - place to store transposed key schedule or NULL
+;;; KS2 [out] - place to store transposed key schedule or NULL
+;;; KS3 [out] - place to store transposed key schedule or NULL
+;;; IV0 [out] - r512; initialization vector
+;;; IV1 [out] - r512; initialization vector
+;;; T0-T27 [clobbered] - temporary r512
+;;; DIR [in] - ENC/DEC (keys arranged in different order for enc/dec)
+%macro DES3_INIT 36
+%define %%STATE_KEYS %1
+%define %%STATE_IV %2
+%define %%KS1 %3
+%define %%KS2 %4
+%define %%KS3 %5
+%define %%IV0 %6
+%define %%IV1 %7
+%define %%T0 %8
+%define %%T1 %9
+%define %%T2 %10
+%define %%T3 %11
+%define %%T4 %12
+%define %%T5 %13
+%define %%T6 %14
+%define %%T7 %15
+%define %%T8 %16
+%define %%T9 %17
+%define %%T10 %18
+%define %%T11 %19
+%define %%T12 %20
+%define %%T13 %21
+%define %%T14 %22
+%define %%T15 %23
+%define %%T16 %24
+%define %%T17 %25
+%define %%T18 %26
+%define %%T19 %27
+%define %%T20 %28
+%define %%T21 %29
+%define %%T22 %30
+%define %%T23 %31
+%define %%T24 %32
+%define %%T25 %33
+%define %%T26 %34
+%define %%T27 %35
+%define %%DIR %36
+
+%ifidn %%DIR, ENC
+%assign KEY_IDX 0
+%else
+%assign KEY_IDX 2
+%endif
+%assign KS_IDX 1
+
+%rep 3
+ ;; set up the key schedule
+ ;; - load first half of the keys & transpose
+ ;; - transpose and store
+ ;; note: we can use IV registers as temprary ones here
+
+%assign IDX 0
+%rep 16
+ mov IA0, [%%STATE_KEYS + (IDX*PTR_SZ)]
+ mov IA0, [IA0 + (KEY_IDX * PTR_SZ)]
+ vmovdqu64 %%T %+ IDX, [IA0]
+%assign IDX (IDX + 1)
+%endrep
+ TRANSPOSE_IN %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23, %%T24, %%T25, %%T26, %%T27, %%IV0, %%IV1
+%assign IDX 0
+%rep 16
+ vmovdqu64 [%%KS %+ KS_IDX + (IDX * 64)], %%T %+ IDX
+%assign IDX (IDX + 1)
+%endrep
+ ;; - load second half of the keys & transpose
+ ;; - transpose and store
+ ;; note: we can use IV registers as temprary ones here
+%assign IDX 0
+%rep 16
+ mov IA0, [%%STATE_KEYS + (IDX*PTR_SZ)]
+ mov IA0, [IA0 + (KEY_IDX * PTR_SZ)]
+ vmovdqu64 %%T %+ IDX, [IA0 + 64]
+%assign IDX (IDX + 1)
+%endrep
+ TRANSPOSE_IN %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23, %%T24, %%T25, %%T26, %%T27, %%IV0, %%IV1
+%assign IDX 0
+%rep 16
+ vmovdqu64 [%%KS %+ KS_IDX + (16 * 64) + (IDX * 64)], %%T %+ IDX
+%assign IDX (IDX + 1)
+%endrep
+
+%ifidn %%DIR, ENC
+%assign KEY_IDX (KEY_IDX + 1)
+%else
+%assign KEY_IDX (KEY_IDX - 1)
+%endif
+%assign KS_IDX (KS_IDX + 1)
+%endrep ; KEY_IDX / KS_IDX
+
+ ;; set up IV
+ ;; - they are already kept transposed so this is enough to load them
+ vmovdqu64 %%IV0, [%%STATE_IV + (0 * 64)]
+ vmovdqu64 %%IV1, [%%STATE_IV + (1 * 64)]
+
+%endmacro
+
+;;; ===========================================================================
+;;; DES FINISH
+;;; Update in/out pointers and store IV
+;;; ===========================================================================
+;;;
+;;; Needs: STATE & SIZE
+;;; IV0 [in] - r512; initialization vector
+;;; IV1 [in] - r512; initialization vector
+;;; T0-T4 [clobbered] - temporary r512 registers
+%macro DES_FINISH 7
+%define %%IV0 %1
+%define %%IV1 %2
+%define %%T0 %3
+%define %%T1 %4
+%define %%T2 %5
+%define %%T3 %6
+%define %%T4 %7
+
+ vpbroadcastq %%T4, SIZE
+ vmovdqu64 %%T0, [STATE + _des_args_in + (0 * PTR_SZ)]
+ vmovdqu64 %%T1, [STATE + _des_args_in + (8 * PTR_SZ)]
+ vmovdqu64 %%T2, [STATE + _des_args_out + (0 * PTR_SZ)]
+ vmovdqu64 %%T3, [STATE + _des_args_out + (8 * PTR_SZ)]
+ vpaddq %%T0, %%T0, %%T4
+ vpaddq %%T1, %%T1, %%T4
+ vpaddq %%T2, %%T2, %%T4
+ vpaddq %%T3, %%T3, %%T4
+ vmovdqu64 [STATE + _des_args_in + (0 * PTR_SZ)], %%T0
+ vmovdqu64 [STATE + _des_args_in + (8 * PTR_SZ)], %%T1
+ vmovdqu64 [STATE + _des_args_out + (0 * PTR_SZ)], %%T2
+ vmovdqu64 [STATE + _des_args_out + (8 * PTR_SZ)], %%T3
+
+ vmovdqu64 [STATE + _des_args_IV + (0 * 64)], %%IV0
+ vmovdqu64 [STATE + _des_args_IV + (1 * 64)], %%IV1
+%endmacro
+
+;;; ===========================================================================
+;;; DES CFB ENCRYPT/DECRYPT - ONE BLOCK ONLY
+;;; ===========================================================================
+;;;
+;;; Needs: STATE, IA0-IA2
+;;; ENC_DEC [in] - encyrpt (ENC) or decrypt (DEC) selection
+;;; KS [in] - key schedule
+;;; T0-T24 [clobbered] - temporary r512
+;;; T_IN [in] - 16 * 8 byte storage
+;;; T_OUT [in] - 16 * 8 byte storage
+;;; T_MASK [in] - 16 * 4 byte storage
+;;; T_IV [in] - 16 * 8 byte storage
+;;;
+;;; NOTE: clobbers OpMask registers
+%macro DES_CFB_ONE 31
+%define %%ENC_DEC %1
+%define %%KS %2
+%define %%T0 %3
+%define %%T1 %4
+%define %%T2 %5
+%define %%T3 %6
+%define %%T4 %7
+%define %%T5 %8
+%define %%T6 %9
+%define %%T7 %10
+%define %%T8 %11
+%define %%T9 %12
+%define %%T10 %13
+%define %%T11 %14
+%define %%T12 %15
+%define %%T13 %16
+%define %%T14 %17
+%define %%T15 %18
+%define %%T16 %19
+%define %%T17 %20
+%define %%T18 %21
+%define %%T19 %22
+%define %%T20 %23
+%define %%T21 %24
+%define %%T22 %25
+%define %%T23 %26
+%define %%T24 %27
+%define %%T_IN %28
+%define %%T_OUT %29
+%define %%T_IV %30
+%define %%T_MASK %31
+
+ ;; - find mask for non-zero partial lengths
+ vpxord %%T10, %%T10, %%T10
+ vmovdqu64 %%T0, [STATE + _des_args_PLen]
+ vpcmpd k3, %%T0, %%T10, 4 ; NEQ
+ kmovw DWORD(IA0), k3
+ movzx DWORD(IA0), WORD(IA0)
+ or DWORD(IA0), DWORD(IA0)
+ jz %%_des_cfb_one_end ; no non-zero partial lengths
+
+%ifidn %%ENC_DEC, ENC
+ ;; For encyrption case we need to make sure that
+ ;; all full blocks are complete before proceeding
+ ;; with CFB partial block.
+ ;; To do that current out position is compared against
+ ;; calculated last full block position.
+ vmovdqu64 %%T1, [STATE + _des_args_out + (0*8)]
+ vmovdqu64 %%T2, [STATE + _des_args_LOut + (0*8)]
+ vmovdqu64 %%T3, [STATE + _des_args_out + (8*8)]
+ vmovdqu64 %%T4, [STATE + _des_args_LOut + (8*8)]
+ vpcmpq k4, %%T1, %%T2, 0 ; EQ
+ vpcmpq k5, %%T3, %%T4, 0 ; EQ
+ kmovw DWORD(IA1), k4
+ movzx DWORD(IA1), BYTE(IA1)
+ kmovw DWORD(IA2), k5
+ movzx DWORD(IA2), BYTE(IA2)
+ shl DWORD(IA2), 8
+ or DWORD(IA2), DWORD(IA1)
+ and DWORD(IA0), DWORD(IA2)
+ jz %%_des_cfb_one_end ; no non-zero lengths left
+ kmovw k3, DWORD(IA0)
+%endif
+ ;; Calculate ((1 << partial_bytes) - 1)
+ ;; in order to get the mask for loads and stores
+ ;; k3 & IA0 - hold valid mask
+ vmovdqa64 %%T1, [rel vec_ones_32b]
+ vpsllvd %%T2{k3}{z}, %%T1, %%T0
+ vpsubd %%T2{k3}{z}, %%T2, %%T1
+ vmovdqu64 [%%T_MASK], %%T2
+
+ ;; clear selected partial lens not to do them twice
+ vmovdqu32 [STATE + _des_args_PLen]{k3}, %%T10
+
+ ;; copy IV, in and out pointers
+ vmovdqu64 %%T1, [STATE + _des_args_in + (0*PTR_SZ)]
+ vmovdqu64 %%T2, [STATE + _des_args_in + (8*PTR_SZ)]
+ vmovdqu64 %%T3, [STATE + _des_args_out + (0*PTR_SZ)]
+ vmovdqu64 %%T4, [STATE + _des_args_out + (8*PTR_SZ)]
+ vmovdqu64 %%T5, [STATE + _des_args_IV + (0*64)]
+ vmovdqu64 %%T6, [STATE + _des_args_IV + (1*64)]
+ vmovdqu64 [%%T_IN + (0*PTR_SZ)], %%T1
+ vmovdqu64 [%%T_IN + (8*PTR_SZ)], %%T2
+ vmovdqu64 [%%T_OUT + (0*PTR_SZ)], %%T3
+ vmovdqu64 [%%T_OUT + (8*PTR_SZ)], %%T4
+ vmovdqu64 [%%T_IV + (0*64)], %%T5
+ vmovdqu64 [%%T_IV + (1*64)], %%T6
+
+ ;; calculate last block case mask
+ ;; - first block case requires no modifications to in/out/IV
+ vmovdqu64 %%T1, [STATE + _des_args_BLen]
+ vpcmpd k2, %%T1, %%T10, 4 ; NEQ
+ kmovw DWORD(IA1), k2
+ and DWORD(IA1), DWORD(IA0)
+ jz %%_des_cfb_one_no_last_blocks
+
+ ;; set up IV, in and out for the last block case
+ ;; - Last block needs in and out to be set differently (decryption only)
+ ;; - IA1 holds the last block mask
+%ifidn %%ENC_DEC, DEC
+ mov DWORD(IA0), DWORD(IA1)
+ mov DWORD(IA2), DWORD(IA1)
+ shr DWORD(IA1), 8
+ and DWORD(IA2), 0xff
+ kmovw k4, DWORD(IA2)
+ kmovw k5, DWORD(IA1)
+ vmovdqu64 %%T1, [STATE + _des_args_LOut + (0*PTR_SZ)]
+ vmovdqu64 %%T2, [STATE + _des_args_LOut + (8*PTR_SZ)]
+ vmovdqu64 %%T3, [STATE + _des_args_LIn + (0*PTR_SZ)]
+ vmovdqu64 %%T4, [STATE + _des_args_LIn + (8*PTR_SZ)]
+ vmovdqu64 [%%T_OUT + (0*PTR_SZ)]{k4}, %%T1
+ vmovdqu64 [%%T_OUT + (8*PTR_SZ)]{k5}, %%T2
+ vmovdqu64 [%%T_IN + (0*PTR_SZ)]{k4}, %%T3
+ vmovdqu64 [%%T_IN + (8*PTR_SZ)]{k5}, %%T4
+%endif ; decryption
+ ;; - IV has to be set differently for CFB as well
+ ;; - IA0 holds the last block mask
+%assign IDX 0
+%rep 16
+ test DWORD(IA0), (1 << IDX)
+ jz %%_des_cfb_one_copy_iv_next %+ IDX
+%ifidn %%ENC_DEC, ENC
+ mov IA2, [STATE + _des_args_LOut + (IDX*PTR_SZ)]
+%else
+ mov IA2, [STATE + _des_args_LIn + (IDX*PTR_SZ)]
+%endif
+ mov IA2, [IA2 - 8]
+ mov [%%T_IV + (0*4) + (IDX*4)], DWORD(IA2)
+ shr IA2, 32
+ mov [%%T_IV + (16*4) + (IDX*4)], DWORD(IA2)
+%%_des_cfb_one_copy_iv_next %+ IDX:
+%assign IDX (IDX + 1)
+%endrep
+
+%%_des_cfb_one_no_last_blocks:
+ ;; Uffff ... finally let's do some DES CFB
+ ;; - let's use T_IN, T_OUT, T_IV and T_MASK
+
+ ;; - load data with the corresponding masks & transpose
+ ;; - T0 to T15 will hold the data
+ xor IA0, IA0
+%assign IDX 0
+%assign K_IDX 1
+%rep 16
+ mov IA1, [%%T_IN + (IDX*PTR_SZ)]
+ mov DWORD(IA0), [%%T_MASK + (IDX*4)]
+ kmovq k %+ K_IDX, IA0
+ vmovdqu8 %%T %+ IDX{k %+ K_IDX}{z}, [IA1]
+%assign IDX (IDX + 1)
+%assign K_IDX (K_IDX + 1)
+%if K_IDX > 7
+%assign K_IDX 1 ; iterate through K1 to K7
+%endif
+%endrep
+ ;; - transpose the data in T0 to T15, T16 to T23 are clobbered
+ TRANSPOSE_IN_ONE %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23
+
+ ;; - set up IV and %%T16 & %%T17 used as IV0 and IV1
+ vmovdqu64 %%T16, [%%T_IV + (0 * 64)] ;IV0
+ vmovdqu64 %%T17, [%%T_IV + (1 * 64)] ;IV1
+ ;; DES encrypt
+ ;; - R0 - %%T0
+ ;; - L0 - %%T1
+ DES_ENC_DEC ENC, %%T16, %%T17, %%KS, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13
+ ;; CFB style xor with R0/L0 with IV
+ ;; - IV0 - %%T16
+ ;; - IV1 - %%T17
+ vpxord %%T2, %%T17, %%T0 ; R0 ^ IV1
+ vpxord %%T0, %%T16, %%T1 ; L0 ^ IV0
+ vmovdqa64 %%T1, %%T2
+ ;; - new R0 = L0 ^ IV0 (%%T0)
+ ;; - new L0 = R0 ^ IV1 (%%T1)
+
+ ;; Transpose the data out
+ ;; - %%T2 to %%T24 clobbered
+ TRANSPOSE_OUT_ONE %%T0, %%T1, %%T2, %%T3, %%T4, %%T5, %%T6, %%T7, %%T8, %%T9, %%T10, %%T11, %%T12, %%T13, %%T14, %%T15, %%T16, %%T17, %%T18, %%T19, %%T20, %%T21, %%T22, %%T23, %%T24
+
+ ;; Store the transposed data
+ ;; - T0 to T15 will hold the data
+ xor IA0, IA0
+%assign IDX 0
+%assign K_IDX 1
+%rep 16
+ mov IA1, [%%T_OUT + (IDX*PTR_SZ)]
+ mov DWORD(IA0), [%%T_MASK + (IDX*4)]
+ kmovq k %+ K_IDX, IA0
+ vmovdqu8 [IA1]{k %+ K_IDX}, %%T %+ IDX
+%assign IDX (IDX + 1)
+%assign K_IDX (K_IDX + 1)
+%if K_IDX > 7
+%assign K_IDX 1 ; iterate through K1 to K7
+%endif
+%endrep
+
+%ifdef SAFE_DATA
+ ;; Clear copied IV's
+ vpxorq %%T5, %%T5
+ vmovdqu64 [%%T_IV + (0*64)], %%T5
+ vmovdqu64 [%%T_IV + (1*64)], %%T5
+%endif
+
+%%_des_cfb_one_end:
+
+%endmacro
+
+;;; ===========================================================================
+;;; Converts length into mask of DES blocks
+;;; ===========================================================================
+;;;
+;;; MASK [out] - mask8 for value; for masked 64b loads and stores (r64)
+;;; USES: IA0, IA1 IA2
+;;; ASSUMES: SIZE - OFFSET < 64
+%macro GET_MASK8 1
+%define %%MASK %1
+
+%ifidn IA1, rcx
+%define myrcx IA1
+%else
+%define myrcx rcx
+ mov IA1, rcx
+%endif
+ mov myrcx, SIZE
+ sub myrcx, OFFSET
+ ;; - myrcx - remaining length
+ ;; - divide by 8 (DES block size)
+ ;; - create bit mask of the result
+ mov DWORD(%%MASK), 1
+ shr DWORD(myrcx), 3
+ shl DWORD(%%MASK), BYTE(myrcx)
+ sub DWORD(%%MASK), 1
+%ifnidn IA1, rcx
+ mov rcx, IA1
+%endif
+%endmacro
+
+;;; ===========================================================================
+;;; DES CBC ENCRYPT CIPHER ONLY (1 to 8 DES blocks only)
+;;; ===========================================================================
+;;;
+;;; NUM_DES_BLOCKS [in] - 1 to 8 DES blocks only
+;;; DES_KS [in] - pointer to transposed key schedule
+;;;
+;;; NOTE: clobbers OpMask registers
+;;; REQUIRES: ZTMP0 - ZTMP13, ZW0-ZW15 (depends on NUM_DES_BLOCKS), ZIV0, ZIV1
+%macro GEN_DES_ENC_CIPHER 2
+%define %%NUM_DES_BLOCKS %1
+%define %%DES_KS %2
+
+%assign RN 0
+%assign LN 1
+%assign RNN 2
+%assign LNN 3
+%rep %%NUM_DES_BLOCKS - 1
+ DES_ENC_DEC ENC, ZW %+ RN, ZW %+ LN, %%DES_KS, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ vpxord ZW %+ RNN, ZW %+ RNN, ZW %+ LN ; R1 = R1 ^ L0
+ vpxord ZW %+ LNN, ZW %+ LNN, ZW %+ RN ; L1 = L1 ^ R0
+%assign RN (RN + 2)
+%assign LN (LN + 2)
+%assign RNN (RNN + 2)
+%assign LNN (LNN + 2)
+%endrep
+ DES_ENC_DEC ENC, ZW %+ RN, ZW %+ LN, %%DES_KS, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ vmovdqa64 ZIV0, ZW %+ LN ; IV0 = L7
+ vmovdqa64 ZIV1, ZW %+ RN ; IV1 = R7
+%endmacro
+
+;;; ===========================================================================
+;;; DES CBC DECRYPT CIPHER ONLY (1 to 8 DES blocks only)
+;;; ===========================================================================
+;;;
+;;; NUM_DES_BLOCKS [in] - 1 to 8 DES blocks only
+;;; DES_KS [in] - pointer to transposed key schedule
+;;;
+;;; NOTE: clobbers OpMask registers
+;;; REQUIRES: ZTMP0 - ZTMP13, ZW0-ZW15 (depends on NUM_DES_BLOCKS), ZIV0, ZIV1
+%macro GEN_DES_DEC_CIPHER 2
+%define %%NUM_DES_BLOCKS %1
+%define %%DES_KS %2
+
+%assign RN 0
+%assign LN 1
+%rep %%NUM_DES_BLOCKS
+ vmovdqa64 ZTMP12, ZW %+ RN ; keep R0 as IV for the next round
+ vmovdqa64 ZTMP13, ZW %+ LN ; keep L0 as IV for the next round
+ DES_ENC_DEC DEC, ZW %+ RN, ZW %+ LN, %%DES_KS, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ vpxord ZW %+ RN, ZW %+ RN, ZIV1 ; R0 = R0 ^ IV1
+ vpxord ZW %+ LN, ZW %+ LN, ZIV0 ; L0 = L0 ^ IV0
+ vmovdqa64 ZIV0, ZTMP12
+ vmovdqa64 ZIV1, ZTMP13
+%assign RN (RN + 2)
+%assign LN (LN + 2)
+%endrep
+%endmacro
+
+;;; ===========================================================================
+;;; 3DES CBC ENCRYPT CIPHER ONLY (1 to 8 DES blocks only)
+;;; ===========================================================================
+;;;
+;;; NUM_DES_BLOCKS [in] - 1 to 8 DES blocks only
+;;; DES_KS1 [in] - pointer to transposed key schedule 1
+;;; DES_KS2 [in] - pointer to transposed key schedule 2
+;;; DES_KS3 [in] - pointer to transposed key schedule 3
+;;;
+;;; NOTE: clobbers OpMask registers
+;;; REQUIRES: ZTMP0 - ZTMP13, ZW0-ZW15 (depends on NUM_DES_BLOCKS), ZIV0, ZIV1
+%macro GEN_3DES_ENC_CIPHER 4
+%define %%NUM_DES_BLOCKS %1
+%define %%DES_KS1 %2
+%define %%DES_KS2 %3
+%define %%DES_KS3 %4
+
+%assign RN 0
+%assign LN 1
+%assign RNN 2
+%assign LNN 3
+%rep %%NUM_DES_BLOCKS
+ ;; ENC
+ DES_ENC_DEC ENC, ZW %+ RN, ZW %+ LN, %%DES_KS1, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ ;; DEC
+ DES_ENC_DEC DEC, ZW %+ LN, ZW %+ RN, %%DES_KS2, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ ;; ENC
+ DES_ENC_DEC ENC, ZW %+ RN, ZW %+ LN, %%DES_KS3, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+%if (RNN < (%%NUM_DES_BLOCKS * 2))
+ vpxord ZW %+ RNN, ZW %+ RNN, ZW %+ LN ; R1 = R1 ^ L0
+ vpxord ZW %+ LNN, ZW %+ LNN, ZW %+ RN ; L1 = L1 ^ R0
+%else
+ vmovdqa64 ZIV0, ZW %+ LN ; IV0 = L7
+ vmovdqa64 ZIV1, ZW %+ RN ; IV1 = R7
+%endif
+
+%assign RN (RN + 2)
+%assign LN (LN + 2)
+%assign RNN (RNN + 2)
+%assign LNN (LNN + 2)
+%endrep
+
+%endmacro
+
+;;; ===========================================================================
+;;; 3DES CBC DECRYPT CIPHER ONLY (1 to 8 DES blocks only)
+;;; ===========================================================================
+;;;
+;;; NUM_DES_BLOCKS [in] - 1 to 8 DES blocks only
+;;; DES_KS1 [in] - pointer to transposed key schedule 1
+;;; DES_KS2 [in] - pointer to transposed key schedule 2
+;;; DES_KS3 [in] - pointer to transposed key schedule 3
+;;;
+;;; NOTE: clobbers OpMask registers
+;;; REQUIRES: ZTMP0 - ZTMP13, ZW0-ZW15 (depends on NUM_DES_BLOCKS), ZIV0, ZIV1
+%macro GEN_3DES_DEC_CIPHER 4
+%define %%NUM_DES_BLOCKS %1
+%define %%DES_KS1 %2
+%define %%DES_KS2 %3
+%define %%DES_KS3 %4
+
+%assign RN 0
+%assign LN 1
+%rep %%NUM_DES_BLOCKS
+ vmovdqa64 ZTMP12, ZW %+ RN ; keep R0 as IV for the next round
+ vmovdqa64 ZTMP13, ZW %+ LN ; keep L0 as IV for the next round
+ ;; DEC
+ DES_ENC_DEC DEC, ZW %+ RN, ZW %+ LN, %%DES_KS1, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ ;; ENC
+ DES_ENC_DEC ENC, ZW %+ LN, ZW %+ RN, %%DES_KS2, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ ;; DEC
+ DES_ENC_DEC DEC, ZW %+ RN, ZW %+ LN, %%DES_KS3, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+ vpxord ZW %+ RN, ZW %+ RN, ZIV1 ; R0 = R0 ^ IV1
+ vpxord ZW %+ LN, ZW %+ LN, ZIV0 ; L0 = L0 ^ IV0
+ vmovdqa64 ZIV0, ZTMP12
+ vmovdqa64 ZIV1, ZTMP13
+
+%assign RN (RN + 2)
+%assign LN (LN + 2)
+%endrep
+
+%endmacro
+
+;;; ===========================================================================
+;;; DES CBC / DOCSIS DES ENCRYPT
+;;; ===========================================================================
+;;;
+;;; DES_DOCSIS [in] - select between DES (DES CBC), DOCSIS (DOCSIS DES) and
+;;; 3DES (3DES CBC)
+;;;
+;;; NOTE: clobbers OpMask registers
+%macro GENERIC_DES_ENC 1
+%define %%DES_DOCSIS %1
+
+ ;; push the registers and allocate the stack frame
+ mov rax, rsp
+ sub rsp, STACKFRAME_size
+ and rsp, -64
+ mov [rsp + _rsp_save], rax ; original SP
+ mov [rsp + _gpr_save + 0*8], r12
+ mov [rsp + _gpr_save + 1*8], r13
+ mov [rsp + _gpr_save + 2*8], r14
+ mov [rsp + _gpr_save + 3*8], r15
+
+%ifnidn %%DES_DOCSIS, 3DES
+ ;; DES and DOCSIS DES
+ DES_INIT STATE + _des_args_keys, STATE + _des_args_IV, rsp + _key_sched, ZIV0, ZIV1, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+%else
+ ;; 3DES
+ DES3_INIT STATE + _des_args_keys, STATE + _des_args_IV, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3, ZIV0, ZIV1, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ENC
+%endif
+ mov [rsp + _size_save], SIZE
+ and SIZE, -64
+ xor OFFSET, OFFSET
+ ;; This loop processes message in blocks of 64 bytes.
+ ;; Anything smaller than 64 bytes is handled separately after the loop.
+%%_gen_des_enc_loop:
+ cmp OFFSET, SIZE
+ jz %%_gen_des_enc_loop_end
+ ;; run loads
+ mov IA0, [STATE + _des_args_in + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (7*PTR_SZ)]
+ vmovdqu64 ZW0, [IA0 + OFFSET]
+ vmovdqu64 ZW1, [IA1 + OFFSET]
+ vmovdqu64 ZW2, [IA2 + OFFSET]
+ vmovdqu64 ZW3, [INP0 + OFFSET]
+ vmovdqu64 ZW4, [INP1 + OFFSET]
+ vmovdqu64 ZW5, [INP2 + OFFSET]
+ vmovdqu64 ZW6, [INP3 + OFFSET]
+ vmovdqu64 ZW7, [INP4 + OFFSET]
+
+ mov IA0, [STATE + _des_args_in + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (15*PTR_SZ)]
+ vmovdqu64 ZW8, [IA0 + OFFSET]
+ vmovdqu64 ZW9, [IA1 + OFFSET]
+ vmovdqu64 ZW10, [IA2 + OFFSET]
+ vmovdqu64 ZW11, [INP0 + OFFSET]
+ vmovdqu64 ZW12, [INP1 + OFFSET]
+ vmovdqu64 ZW13, [INP2 + OFFSET]
+ vmovdqu64 ZW14, [INP3 + OFFSET]
+ vmovdqu64 ZW15, [INP4 + OFFSET]
+
+ ;; Transpose input
+ TRANSPOSE_IN ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; DES CBC ENC comes here
+ vpxord ZW0, ZW0, ZIV0 ; R0 = R0 ^ IV0
+ vpxord ZW1, ZW1, ZIV1 ; L0 = L0 ^ IV1
+
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 8, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 8, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+
+ ;; transpose data on output
+ TRANSPOSE_OUT ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+ ;; run stores
+ mov IA0, [STATE + _des_args_out + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (7*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET], ZW0
+ vmovdqu64 [IA1 + OFFSET], ZW1
+ vmovdqu64 [IA2 + OFFSET], ZW2
+ vmovdqu64 [INP0 + OFFSET], ZW3
+ vmovdqu64 [INP1 + OFFSET], ZW4
+ vmovdqu64 [INP2 + OFFSET], ZW5
+ vmovdqu64 [INP3 + OFFSET], ZW6
+ vmovdqu64 [INP4 + OFFSET], ZW7
+
+ mov IA0, [STATE + _des_args_out + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (15*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET], ZW8
+ vmovdqu64 [IA1 + OFFSET], ZW9
+ vmovdqu64 [IA2 + OFFSET], ZW10
+ vmovdqu64 [INP0 + OFFSET], ZW11
+ vmovdqu64 [INP1 + OFFSET], ZW12
+ vmovdqu64 [INP2 + OFFSET], ZW13
+ vmovdqu64 [INP3 + OFFSET], ZW14
+ vmovdqu64 [INP4 + OFFSET], ZW15
+
+ add OFFSET, 64
+ jmp %%_gen_des_enc_loop
+%%_gen_des_enc_loop_end:
+ ;; This is where we check if there is anything less than 64 bytes
+ ;; of message left for processing.
+ mov SIZE, [rsp + _size_save]
+ cmp OFFSET, SIZE
+ jz %%_gen_des_enc_part_end
+ ;; calculate min of bytes_left and 64, convert to qword mask
+ GET_MASK8 IA0 ; IA0 = mask
+
+ kmovw k7, DWORD(IA0)
+ mov [rsp + _mask_save], IA0
+ ;; run masked loads
+ mov IA0, [STATE + _des_args_in + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (7*PTR_SZ)]
+ vmovdqu64 ZW0{k7}{z}, [IA0 + OFFSET]
+ vmovdqu64 ZW1{k7}{z}, [IA1 + OFFSET]
+ vmovdqu64 ZW2{k7}{z}, [IA2 + OFFSET]
+ vmovdqu64 ZW3{k7}{z}, [INP0 + OFFSET]
+ vmovdqu64 ZW4{k7}{z}, [INP1 + OFFSET]
+ vmovdqu64 ZW5{k7}{z}, [INP2 + OFFSET]
+ vmovdqu64 ZW6{k7}{z}, [INP3 + OFFSET]
+ vmovdqu64 ZW7{k7}{z}, [INP4 + OFFSET]
+
+ mov IA0, [STATE + _des_args_in + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (15*PTR_SZ)]
+ vmovdqu64 ZW8{k7}{z}, [IA0 + OFFSET]
+ vmovdqu64 ZW9{k7}{z}, [IA1 + OFFSET]
+ vmovdqu64 ZW10{k7}{z}, [IA2 + OFFSET]
+ vmovdqu64 ZW11{k7}{z}, [INP0 + OFFSET]
+ vmovdqu64 ZW12{k7}{z}, [INP1 + OFFSET]
+ vmovdqu64 ZW13{k7}{z}, [INP2 + OFFSET]
+ vmovdqu64 ZW14{k7}{z}, [INP3 + OFFSET]
+ vmovdqu64 ZW15{k7}{z}, [INP4 + OFFSET]
+
+ ;; Transpose input
+ TRANSPOSE_IN ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; DES CBC ENC comes here
+ vpxord ZW0, ZW0, ZIV0 ; R0 = R0 ^ IV0
+ vpxord ZW1, ZW1, ZIV1 ; L0 = L0 ^ IV1
+
+ mov IA0, [rsp + _mask_save]
+ cmp BYTE(IA0), 0x0f
+ ja %%_gt_4
+ jz %%_blocks_4
+
+ cmp BYTE(IA0), 0x03
+ ja %%_blocks_3
+ jz %%_blocks_2
+
+ ;; process one block and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 1, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 1, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_2:
+ ;; process two blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 2, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 2, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_3:
+ ;; process three blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 3, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 3, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_4:
+ ;; process four blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 4, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 4, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_gt_4:
+ cmp BYTE(IA0), 0x3f
+ ja %%_blocks_7
+ jz %%_blocks_6
+%%_blocks_5:
+ ;; process five blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 5, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 5, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_6:
+ ;; process six blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 6, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 6, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_7:
+ ;; process seven blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_ENC_CIPHER 7, rsp + _key_sched
+%else
+ GEN_3DES_ENC_CIPHER 7, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+
+%%_transpose_out:
+ ;; transpose data on output
+ TRANSPOSE_OUT ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; run masked stores
+ mov IA0, [STATE + _des_args_out + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (7*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET]{k7}, ZW0
+ vmovdqu64 [IA1 + OFFSET]{k7}, ZW1
+ vmovdqu64 [IA2 + OFFSET]{k7}, ZW2
+ vmovdqu64 [INP0 + OFFSET]{k7}, ZW3
+ vmovdqu64 [INP1 + OFFSET]{k7}, ZW4
+ vmovdqu64 [INP2 + OFFSET]{k7}, ZW5
+ vmovdqu64 [INP3 + OFFSET]{k7}, ZW6
+ vmovdqu64 [INP4 + OFFSET]{k7}, ZW7
+
+ mov IA0, [STATE + _des_args_out + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (15*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET]{k7}, ZW8
+ vmovdqu64 [IA1 + OFFSET]{k7}, ZW9
+ vmovdqu64 [IA2 + OFFSET]{k7}, ZW10
+ vmovdqu64 [INP0 + OFFSET]{k7}, ZW11
+ vmovdqu64 [INP1 + OFFSET]{k7}, ZW12
+ vmovdqu64 [INP2 + OFFSET]{k7}, ZW13
+ vmovdqu64 [INP3 + OFFSET]{k7}, ZW14
+ vmovdqu64 [INP4 + OFFSET]{k7}, ZW15
+%%_gen_des_enc_part_end:
+
+ ;; store IV and update pointers
+ DES_FINISH ZIV0, ZIV1, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4
+
+ ;; CFB part for DOCSIS
+%ifidn %%DES_DOCSIS, DOCSIS
+ DES_CFB_ONE ENC, rsp + _key_sched, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, rsp + _tmp_in, rsp + _tmp_out, rsp + _tmp_iv, rsp + _tmp_mask
+%endif
+
+ CLEAR_KEY_SCHEDULE %%DES_DOCSIS, ZW0
+
+ ;; restore stack pointer and registers
+ mov r12, [rsp + _gpr_save + 0*8]
+ mov r13, [rsp + _gpr_save + 1*8]
+ mov r14, [rsp + _gpr_save + 2*8]
+ mov r15, [rsp + _gpr_save + 3*8]
+ mov rsp, [rsp + _rsp_save] ; original SP
+%endmacro
+
+;;; ===========================================================================
+;;; DES CBC / DOCSIS DES DECRYPT
+;;; ===========================================================================
+;;;
+;;; DES_DOCSIS [in] - select between DES (DES CBC), DOCSIS (DOCSIS DES) and
+;;; 3DES (3DES CBC)
+;;;
+;;; NOTE: clobbers OpMask registers
+%macro GENERIC_DES_DEC 1
+%define %%DES_DOCSIS %1
+
+ ;; push the registers and allocate the stack frame
+ mov rax, rsp
+ sub rsp, STACKFRAME_size
+ and rsp, -64
+ mov [rsp + _rsp_save], rax ; original SP
+ mov [rsp + _gpr_save + 0*8], r12
+ mov [rsp + _gpr_save + 1*8], r13
+ mov [rsp + _gpr_save + 2*8], r14
+ mov [rsp + _gpr_save + 3*8], r15
+
+%ifnidn %%DES_DOCSIS, 3DES
+ ;; DES and DOCSIS
+ DES_INIT STATE + _des_args_keys, STATE + _des_args_IV, rsp + _key_sched, ZIV0, ZIV1, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11
+%else
+ ;; 3DES
+ DES3_INIT STATE + _des_args_keys, STATE + _des_args_IV, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3, ZIV0, ZIV1, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, DEC
+%endif
+
+ ;; CFB part for DOCSIS
+%ifidn %%DES_DOCSIS, DOCSIS
+ DES_CFB_ONE DEC, rsp + _key_sched, ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, rsp + _tmp_in, rsp + _tmp_out, rsp + _tmp_iv, rsp + _tmp_mask
+%endif
+
+ mov [rsp + _size_save], SIZE
+ and SIZE, -64
+ xor OFFSET, OFFSET
+ ;; This loop processes message in blocks of 64 bytes.
+ ;; Anything smaller than 64 bytes is handled separately after the loop.
+%%_gen_des_dec_loop:
+ cmp OFFSET, SIZE
+ jz %%_gen_des_dec_loop_end
+ ;; run loads
+ mov IA0, [STATE + _des_args_in + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (7*PTR_SZ)]
+ vmovdqu64 ZW0, [IA0 + OFFSET]
+ vmovdqu64 ZW1, [IA1 + OFFSET]
+ vmovdqu64 ZW2, [IA2 + OFFSET]
+ vmovdqu64 ZW3, [INP0 + OFFSET]
+ vmovdqu64 ZW4, [INP1 + OFFSET]
+ vmovdqu64 ZW5, [INP2 + OFFSET]
+ vmovdqu64 ZW6, [INP3 + OFFSET]
+ vmovdqu64 ZW7, [INP4 + OFFSET]
+
+ mov IA0, [STATE + _des_args_in + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (15*PTR_SZ)]
+ vmovdqu64 ZW8, [IA0 + OFFSET]
+ vmovdqu64 ZW9, [IA1 + OFFSET]
+ vmovdqu64 ZW10, [IA2 + OFFSET]
+ vmovdqu64 ZW11, [INP0 + OFFSET]
+ vmovdqu64 ZW12, [INP1 + OFFSET]
+ vmovdqu64 ZW13, [INP2 + OFFSET]
+ vmovdqu64 ZW14, [INP3 + OFFSET]
+ vmovdqu64 ZW15, [INP4 + OFFSET]
+
+ ;; Transpose input
+ TRANSPOSE_IN ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+%ifnidn %%DES_DOCSIS, 3DES
+ ;; DES CBC DEC comes here
+ GEN_DES_DEC_CIPHER 8, rsp + _key_sched
+%else
+ ;; 3DES CBC DEC comes here
+ GEN_3DES_DEC_CIPHER 8, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+
+ ;; transpose data on output
+ TRANSPOSE_OUT ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; run stores
+ mov IA0, [STATE + _des_args_out + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (7*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET], ZW0
+ vmovdqu64 [IA1 + OFFSET], ZW1
+ vmovdqu64 [IA2 + OFFSET], ZW2
+ vmovdqu64 [INP0 + OFFSET], ZW3
+ vmovdqu64 [INP1 + OFFSET], ZW4
+ vmovdqu64 [INP2 + OFFSET], ZW5
+ vmovdqu64 [INP3 + OFFSET], ZW6
+ vmovdqu64 [INP4 + OFFSET], ZW7
+
+ mov IA0, [STATE + _des_args_out + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (15*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET], ZW8
+ vmovdqu64 [IA1 + OFFSET], ZW9
+ vmovdqu64 [IA2 + OFFSET], ZW10
+ vmovdqu64 [INP0 + OFFSET], ZW11
+ vmovdqu64 [INP1 + OFFSET], ZW12
+ vmovdqu64 [INP2 + OFFSET], ZW13
+ vmovdqu64 [INP3 + OFFSET], ZW14
+ vmovdqu64 [INP4 + OFFSET], ZW15
+
+ add OFFSET, 64
+ jmp %%_gen_des_dec_loop
+%%_gen_des_dec_loop_end:
+ ;; This is where we check if there is anything less than 64 bytes
+ ;; of message left for processing.
+ mov SIZE, [rsp + _size_save]
+ cmp OFFSET, SIZE
+ jz %%_gen_des_dec_part_end
+ ;; calculate min of bytes_left and 64, convert to qword mask
+ GET_MASK8 IA0 ; IA0 = mask
+
+ kmovw k7, DWORD(IA0)
+ mov [rsp + _mask_save], IA0
+ ;; run masked loads
+ mov IA0, [STATE + _des_args_in + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (7*PTR_SZ)]
+ vmovdqu64 ZW0{k7}{z}, [IA0 + OFFSET]
+ vmovdqu64 ZW1{k7}{z}, [IA1 + OFFSET]
+ vmovdqu64 ZW2{k7}{z}, [IA2 + OFFSET]
+ vmovdqu64 ZW3{k7}{z}, [INP0 + OFFSET]
+ vmovdqu64 ZW4{k7}{z}, [INP1 + OFFSET]
+ vmovdqu64 ZW5{k7}{z}, [INP2 + OFFSET]
+ vmovdqu64 ZW6{k7}{z}, [INP3 + OFFSET]
+ vmovdqu64 ZW7{k7}{z}, [INP4 + OFFSET]
+
+ mov IA0, [STATE + _des_args_in + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_in + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_in + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_in + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_in + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_in + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_in + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_in + (15*PTR_SZ)]
+ vmovdqu64 ZW8{k7}{z}, [IA0 + OFFSET]
+ vmovdqu64 ZW9{k7}{z}, [IA1 + OFFSET]
+ vmovdqu64 ZW10{k7}{z}, [IA2 + OFFSET]
+ vmovdqu64 ZW11{k7}{z}, [INP0 + OFFSET]
+ vmovdqu64 ZW12{k7}{z}, [INP1 + OFFSET]
+ vmovdqu64 ZW13{k7}{z}, [INP2 + OFFSET]
+ vmovdqu64 ZW14{k7}{z}, [INP3 + OFFSET]
+ vmovdqu64 ZW15{k7}{z}, [INP4 + OFFSET]
+
+ ;; Transpose input
+ TRANSPOSE_IN ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; DES CBC DEC comes here
+ mov IA0, [rsp + _mask_save]
+ cmp BYTE(IA0), 0x0f
+ ja %%_gt_4
+ jz %%_blocks_4
+
+ cmp BYTE(IA0), 0x03
+ ja %%_blocks_3
+ jz %%_blocks_2
+ ;; process one block and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 1, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 1, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_2:
+ ;; process two blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 2, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 2, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_3:
+ ;; process three blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 3, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 3, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_4:
+ ;; process four blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 4, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 4, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_gt_4:
+ cmp BYTE(IA0), 0x3f
+ ja %%_blocks_7
+ jz %%_blocks_6
+%%_blocks_5:
+ ;; process five blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 5, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 5, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_6:
+ ;; process six blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 6, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 6, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+ jmp %%_transpose_out
+
+%%_blocks_7:
+ ;; process seven blocks and move to transpose out
+%ifnidn %%DES_DOCSIS, 3DES
+ GEN_DES_DEC_CIPHER 7, rsp + _key_sched
+%else
+ GEN_3DES_DEC_CIPHER 7, rsp + _key_sched, rsp + _key_sched2, rsp + _key_sched3
+%endif
+
+%%_transpose_out:
+ ;; transpose data on output
+ TRANSPOSE_OUT ZW0, ZW1, ZW2, ZW3, ZW4, ZW5, ZW6, ZW7, ZW8, ZW9, ZW10, ZW11, ZW12, ZW13, ZW14, ZW15, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, ZTMP6, ZTMP7, ZTMP8, ZTMP9, ZTMP10, ZTMP11, ZTMP12, ZTMP13
+
+ ;; run masked stores
+ mov IA0, [STATE + _des_args_out + (0*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (1*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (2*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (3*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (4*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (5*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (6*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (7*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET]{k7}, ZW0
+ vmovdqu64 [IA1 + OFFSET]{k7}, ZW1
+ vmovdqu64 [IA2 + OFFSET]{k7}, ZW2
+ vmovdqu64 [INP0 + OFFSET]{k7}, ZW3
+ vmovdqu64 [INP1 + OFFSET]{k7}, ZW4
+ vmovdqu64 [INP2 + OFFSET]{k7}, ZW5
+ vmovdqu64 [INP3 + OFFSET]{k7}, ZW6
+ vmovdqu64 [INP4 + OFFSET]{k7}, ZW7
+
+ mov IA0, [STATE + _des_args_out + (8*PTR_SZ)]
+ mov IA1, [STATE + _des_args_out + (9*PTR_SZ)]
+ mov IA2, [STATE + _des_args_out + (10*PTR_SZ)]
+ mov INP0, [STATE + _des_args_out + (11*PTR_SZ)]
+ mov INP1, [STATE + _des_args_out + (12*PTR_SZ)]
+ mov INP2, [STATE + _des_args_out + (13*PTR_SZ)]
+ mov INP3, [STATE + _des_args_out + (14*PTR_SZ)]
+ mov INP4, [STATE + _des_args_out + (15*PTR_SZ)]
+ vmovdqu64 [IA0 + OFFSET]{k7}, ZW8
+ vmovdqu64 [IA1 + OFFSET]{k7}, ZW9
+ vmovdqu64 [IA2 + OFFSET]{k7}, ZW10
+ vmovdqu64 [INP0 + OFFSET]{k7}, ZW11
+ vmovdqu64 [INP1 + OFFSET]{k7}, ZW12
+ vmovdqu64 [INP2 + OFFSET]{k7}, ZW13
+ vmovdqu64 [INP3 + OFFSET]{k7}, ZW14
+ vmovdqu64 [INP4 + OFFSET]{k7}, ZW15
+%%_gen_des_dec_part_end:
+
+ ;; store IV and update pointers
+ DES_FINISH ZIV0, ZIV1, ZTMP0, ZTMP1, ZTMP2, ZTMP3, ZTMP4
+
+ CLEAR_KEY_SCHEDULE %%DES_DOCSIS, ZW0
+
+ ;; restore stack pointer and registers
+ mov r12, [rsp + _gpr_save + 0*8]
+ mov r13, [rsp + _gpr_save + 1*8]
+ mov r14, [rsp + _gpr_save + 2*8]
+ mov r15, [rsp + _gpr_save + 3*8]
+ mov rsp, [rsp + _rsp_save] ; original SP
+%endmacro
+
+
+;;; ========================================================
+;;; DATA
+
+section .data
+default rel
+align 64
+mask_values:
+ dd 0x04000000, 0x04000000, 0x04000000, 0x04000000
+ dd 0x04000000, 0x04000000, 0x04000000, 0x04000000
+ dd 0x04000000, 0x04000000, 0x04000000, 0x04000000
+ dd 0x04000000, 0x04000000, 0x04000000, 0x04000000
+ dd 0x40240202, 0x40240202, 0x40240202, 0x40240202
+ dd 0x40240202, 0x40240202, 0x40240202, 0x40240202
+ dd 0x40240202, 0x40240202, 0x40240202, 0x40240202
+ dd 0x40240202, 0x40240202, 0x40240202, 0x40240202
+ dd 0x00001110, 0x00001110, 0x00001110, 0x00001110
+ dd 0x00001110, 0x00001110, 0x00001110, 0x00001110
+ dd 0x00001110, 0x00001110, 0x00001110, 0x00001110
+ dd 0x00001110, 0x00001110, 0x00001110, 0x00001110
+ dd 0x01088000, 0x01088000, 0x01088000, 0x01088000
+ dd 0x01088000, 0x01088000, 0x01088000, 0x01088000
+ dd 0x01088000, 0x01088000, 0x01088000, 0x01088000
+ dd 0x01088000, 0x01088000, 0x01088000, 0x01088000
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+ dd 0x00000001, 0x00000001, 0x00000001, 0x00000001
+ dd 0x0081000C, 0x0081000C, 0x0081000C, 0x0081000C
+ dd 0x0081000C, 0x0081000C, 0x0081000C, 0x0081000C
+ dd 0x0081000C, 0x0081000C, 0x0081000C, 0x0081000C
+ dd 0x0081000C, 0x0081000C, 0x0081000C, 0x0081000C
+ dd 0x00000020, 0x00000020, 0x00000020, 0x00000020
+ dd 0x00000020, 0x00000020, 0x00000020, 0x00000020
+ dd 0x00000020, 0x00000020, 0x00000020, 0x00000020
+ dd 0x00000020, 0x00000020, 0x00000020, 0x00000020
+ dd 0x00000040, 0x00000040, 0x00000040, 0x00000040
+ dd 0x00000040, 0x00000040, 0x00000040, 0x00000040
+ dd 0x00000040, 0x00000040, 0x00000040, 0x00000040
+ dd 0x00000040, 0x00000040, 0x00000040, 0x00000040
+ dd 0x00400400, 0x00400400, 0x00400400, 0x00400400
+ dd 0x00400400, 0x00400400, 0x00400400, 0x00400400
+ dd 0x00400400, 0x00400400, 0x00400400, 0x00400400
+ dd 0x00400400, 0x00400400, 0x00400400, 0x00400400
+ dd 0x00000800, 0x00000800, 0x00000800, 0x00000800
+ dd 0x00000800, 0x00000800, 0x00000800, 0x00000800
+ dd 0x00000800, 0x00000800, 0x00000800, 0x00000800
+ dd 0x00000800, 0x00000800, 0x00000800, 0x00000800
+ dd 0x00002000, 0x00002000, 0x00002000, 0x00002000
+ dd 0x00002000, 0x00002000, 0x00002000, 0x00002000
+ dd 0x00002000, 0x00002000, 0x00002000, 0x00002000
+ dd 0x00002000, 0x00002000, 0x00002000, 0x00002000
+ dd 0x00100000, 0x00100000, 0x00100000, 0x00100000
+ dd 0x00100000, 0x00100000, 0x00100000, 0x00100000
+ dd 0x00100000, 0x00100000, 0x00100000, 0x00100000
+ dd 0x00100000, 0x00100000, 0x00100000, 0x00100000
+ dd 0x00004000, 0x00004000, 0x00004000, 0x00004000
+ dd 0x00004000, 0x00004000, 0x00004000, 0x00004000
+ dd 0x00004000, 0x00004000, 0x00004000, 0x00004000
+ dd 0x00004000, 0x00004000, 0x00004000, 0x00004000
+ dd 0x00020000, 0x00020000, 0x00020000, 0x00020000
+ dd 0x00020000, 0x00020000, 0x00020000, 0x00020000
+ dd 0x00020000, 0x00020000, 0x00020000, 0x00020000
+ dd 0x00020000, 0x00020000, 0x00020000, 0x00020000
+ dd 0x02000000, 0x02000000, 0x02000000, 0x02000000
+ dd 0x02000000, 0x02000000, 0x02000000, 0x02000000
+ dd 0x02000000, 0x02000000, 0x02000000, 0x02000000
+ dd 0x02000000, 0x02000000, 0x02000000, 0x02000000
+ dd 0x08000000, 0x08000000, 0x08000000, 0x08000000
+ dd 0x08000000, 0x08000000, 0x08000000, 0x08000000
+ dd 0x08000000, 0x08000000, 0x08000000, 0x08000000
+ dd 0x08000000, 0x08000000, 0x08000000, 0x08000000
+ dd 0x00000080, 0x00000080, 0x00000080, 0x00000080
+ dd 0x00000080, 0x00000080, 0x00000080, 0x00000080
+ dd 0x00000080, 0x00000080, 0x00000080, 0x00000080
+ dd 0x00000080, 0x00000080, 0x00000080, 0x00000080
+ dd 0x20000000, 0x20000000, 0x20000000, 0x20000000
+ dd 0x20000000, 0x20000000, 0x20000000, 0x20000000
+ dd 0x20000000, 0x20000000, 0x20000000, 0x20000000
+ dd 0x20000000, 0x20000000, 0x20000000, 0x20000000
+ dd 0x90000000, 0x90000000, 0x90000000, 0x90000000
+ dd 0x90000000, 0x90000000, 0x90000000, 0x90000000
+ dd 0x90000000, 0x90000000, 0x90000000, 0x90000000
+ dd 0x90000000, 0x90000000, 0x90000000, 0x90000000
+
+align 64
+init_perm_consts:
+ dd 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f
+ dd 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f
+ dd 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f
+ dd 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f
+ dd 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff
+ dd 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff
+ dd 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff
+ dd 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff
+ dd 0x33333333, 0x33333333, 0x33333333, 0x33333333
+ dd 0x33333333, 0x33333333, 0x33333333, 0x33333333
+ dd 0x33333333, 0x33333333, 0x33333333, 0x33333333
+ dd 0x33333333, 0x33333333, 0x33333333, 0x33333333
+ dd 0x00ff00ff, 0x00ff00ff, 0x00ff00ff, 0x00ff00ff
+ dd 0x00ff00ff, 0x00ff00ff, 0x00ff00ff, 0x00ff00ff
+ dd 0x00ff00ff, 0x00ff00ff, 0x00ff00ff, 0x00ff00ff
+ dd 0x00ff00ff, 0x00ff00ff, 0x00ff00ff, 0x00ff00ff
+ dd 0x55555555, 0x55555555, 0x55555555, 0x55555555
+ dd 0x55555555, 0x55555555, 0x55555555, 0x55555555
+ dd 0x55555555, 0x55555555, 0x55555555, 0x55555555
+ dd 0x55555555, 0x55555555, 0x55555555, 0x55555555
+
+;;; S-Box table
+align 64
+S_box_flipped:
+ ;; SBOX0
+ dw 0x07, 0x02, 0x0c, 0x0f, 0x04, 0x0b, 0x0a, 0x0c
+ dw 0x0b, 0x07, 0x06, 0x09, 0x0d, 0x04, 0x00, 0x0a
+ dw 0x02, 0x08, 0x05, 0x03, 0x0f, 0x06, 0x09, 0x05
+ dw 0x08, 0x01, 0x03, 0x0e, 0x01, 0x0d, 0x0e, 0x00
+ dw 0x00, 0x0f, 0x05, 0x0a, 0x07, 0x02, 0x09, 0x05
+ dw 0x0e, 0x01, 0x03, 0x0c, 0x0b, 0x08, 0x0c, 0x06
+ dw 0x0f, 0x03, 0x06, 0x0d, 0x04, 0x09, 0x0a, 0x00
+ dw 0x02, 0x04, 0x0d, 0x07, 0x08, 0x0e, 0x01, 0x0b
+ ;; SBOX1
+ dw 0x0f, 0x00, 0x09, 0x0a, 0x06, 0x05, 0x03, 0x09
+ dw 0x01, 0x0e, 0x04, 0x03, 0x0c, 0x0b, 0x0a, 0x04
+ dw 0x08, 0x07, 0x0e, 0x01, 0x0d, 0x02, 0x00, 0x0c
+ dw 0x07, 0x0d, 0x0b, 0x06, 0x02, 0x08, 0x05, 0x0f
+ dw 0x0c, 0x0b, 0x03, 0x0d, 0x0f, 0x0c, 0x06, 0x00
+ dw 0x02, 0x05, 0x08, 0x0e, 0x01, 0x02, 0x0d, 0x07
+ dw 0x0b, 0x01, 0x00, 0x06, 0x04, 0x0f, 0x09, 0x0a
+ dw 0x0e, 0x08, 0x05, 0x03, 0x07, 0x04, 0x0a, 0x09
+ ;; SBOX2
+ dw 0x05, 0x0b, 0x08, 0x0d, 0x06, 0x01, 0x0d, 0x0a
+ dw 0x09, 0x02, 0x03, 0x04, 0x0f, 0x0c, 0x04, 0x07
+ dw 0x00, 0x06, 0x0b, 0x08, 0x0c, 0x0f, 0x02, 0x05
+ dw 0x07, 0x09, 0x0e, 0x03, 0x0a, 0x00, 0x01, 0x0e
+ dw 0x0b, 0x08, 0x04, 0x02, 0x0c, 0x06, 0x03, 0x0d
+ dw 0x00, 0x0b, 0x0a, 0x07, 0x06, 0x01, 0x0f, 0x04
+ dw 0x0e, 0x05, 0x01, 0x0f, 0x02, 0x09, 0x0d, 0x0a
+ dw 0x09, 0x00, 0x07, 0x0c, 0x05, 0x0e, 0x08, 0x03
+ ;; SBOX3
+ dw 0x0e, 0x05, 0x08, 0x0f, 0x00, 0x03, 0x0d, 0x0a
+ dw 0x07, 0x09, 0x01, 0x0c, 0x09, 0x0e, 0x02, 0x01
+ dw 0x0b, 0x06, 0x04, 0x08, 0x06, 0x0d, 0x03, 0x04
+ dw 0x0c, 0x00, 0x0a, 0x07, 0x05, 0x0b, 0x0f, 0x02
+ dw 0x0b, 0x0c, 0x02, 0x09, 0x06, 0x05, 0x08, 0x03
+ dw 0x0d, 0x00, 0x04, 0x0a, 0x00, 0x0b, 0x07, 0x04
+ dw 0x01, 0x0f, 0x0e, 0x02, 0x0f, 0x08, 0x05, 0x0e
+ dw 0x0a, 0x06, 0x03, 0x0d, 0x0c, 0x01, 0x09, 0x07
+ ;; SBOX4
+ dw 0x04, 0x02, 0x01, 0x0f, 0x0e, 0x05, 0x0b, 0x06
+ dw 0x02, 0x08, 0x0c, 0x03, 0x0d, 0x0e, 0x07, 0x00
+ dw 0x03, 0x04, 0x0a, 0x09, 0x05, 0x0b, 0x00, 0x0c
+ dw 0x08, 0x0d, 0x0f, 0x0a, 0x06, 0x01, 0x09, 0x07
+ dw 0x07, 0x0d, 0x0a, 0x06, 0x02, 0x08, 0x0c, 0x05
+ dw 0x04, 0x03, 0x0f, 0x00, 0x0b, 0x04, 0x01, 0x0a
+ dw 0x0d, 0x01, 0x00, 0x0f, 0x0e, 0x07, 0x09, 0x02
+ dw 0x03, 0x0e, 0x05, 0x09, 0x08, 0x0b, 0x06, 0x0c
+ ;; SBOX5
+ dw 0x03, 0x09, 0x00, 0x0e, 0x09, 0x04, 0x07, 0x08
+ dw 0x05, 0x0f, 0x0c, 0x02, 0x06, 0x03, 0x0a, 0x0d
+ dw 0x08, 0x07, 0x0b, 0x00, 0x04, 0x01, 0x0e, 0x0b
+ dw 0x0f, 0x0a, 0x02, 0x05, 0x01, 0x0c, 0x0d, 0x06
+ dw 0x05, 0x02, 0x06, 0x0d, 0x0e, 0x09, 0x00, 0x06
+ dw 0x02, 0x04, 0x0b, 0x08, 0x09, 0x0f, 0x0c, 0x01
+ dw 0x0f, 0x0c, 0x08, 0x07, 0x03, 0x0a, 0x0d, 0x00
+ dw 0x04, 0x03, 0x07, 0x0e, 0x0a, 0x05, 0x01, 0x0b
+ ;; SBOX6
+ dw 0x02, 0x08, 0x0c, 0x05, 0x0f, 0x03, 0x0a, 0x00
+ dw 0x04, 0x0d, 0x09, 0x06, 0x01, 0x0e, 0x06, 0x09
+ dw 0x0d, 0x02, 0x03, 0x0f, 0x00, 0x0c, 0x05, 0x0a
+ dw 0x07, 0x0b, 0x0e, 0x01, 0x0b, 0x07, 0x08, 0x04
+ dw 0x0b, 0x06, 0x07, 0x09, 0x02, 0x08, 0x04, 0x07
+ dw 0x0d, 0x0b, 0x0a, 0x00, 0x08, 0x05, 0x01, 0x0c
+ dw 0x00, 0x0d, 0x0c, 0x0a, 0x09, 0x02, 0x0f, 0x04
+ dw 0x0e, 0x01, 0x03, 0x0f, 0x05, 0x0e, 0x06, 0x03
+ ;; SBOX7
+ dw 0x0b, 0x0e, 0x05, 0x00, 0x06, 0x09, 0x0a, 0x0f
+ dw 0x01, 0x02, 0x0c, 0x05, 0x0d, 0x07, 0x03, 0x0a
+ dw 0x04, 0x0d, 0x09, 0x06, 0x0f, 0x03, 0x00, 0x0c
+ dw 0x02, 0x08, 0x07, 0x0b, 0x08, 0x04, 0x0e, 0x01
+ dw 0x08, 0x04, 0x03, 0x0f, 0x05, 0x02, 0x00, 0x0c
+ dw 0x0b, 0x07, 0x06, 0x09, 0x0e, 0x01, 0x09, 0x06
+ dw 0x0f, 0x08, 0x0a, 0x03, 0x0c, 0x05, 0x07, 0x0a
+ dw 0x01, 0x0e, 0x0d, 0x00, 0x02, 0x0b, 0x04, 0x0d
+
+;;; Used in DOCSIS DES partial block scheduling 16 x 32bit of value 1
+align 64
+vec_ones_32b:
+ dd 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+
+align 64
+and_eu:
+ dd 0x3f003f00, 0x3f003f00, 0x3f003f00, 0x3f003f00
+ dd 0x3f003f00, 0x3f003f00, 0x3f003f00, 0x3f003f00
+ dd 0x3f003f00, 0x3f003f00, 0x3f003f00, 0x3f003f00
+ dd 0x3f003f00, 0x3f003f00, 0x3f003f00, 0x3f003f00
+
+align 64
+and_ed:
+ dd 0x003f003f, 0x003f003f, 0x003f003f, 0x003f003f
+ dd 0x003f003f, 0x003f003f, 0x003f003f, 0x003f003f
+ dd 0x003f003f, 0x003f003f, 0x003f003f, 0x003f003f
+ dd 0x003f003f, 0x003f003f, 0x003f003f, 0x003f003f
+
+align 64
+idx_e:
+ dq 0x0d0c090805040100, 0x0f0e0b0a07060302
+ dq 0x1d1c191815141110, 0x1f1e1b1a17161312
+ dq 0x2d2c292825242120, 0x2f2e2b2a27262322
+ dq 0x3d3c393835343130, 0x3f3e3b3a37363332
+
+align 64
+reg_values16bit_7:
+ dq 0x001f001f001f001f, 0x001f001f001f001f
+ dq 0x001f001f001f001f, 0x001f001f001f001f
+ dq 0x001f001f001f001f, 0x001f001f001f001f
+ dq 0x001f001f001f001f, 0x001f001f001f001f
+
+align 64
+shuffle_reg:
+ dq 0x0705060403010200, 0x0f0d0e0c0b090a08
+ dq 0x1715161413111210, 0x1f1d1e1c1b191a18
+ dq 0x2725262423212220, 0x2f2d2e2c2b292a28
+ dq 0x3735363433313230, 0x3f3d3e3c3b393a38
+
+;;; ========================================================
+;;; CODE
+section .text
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(des_x16_cbc_enc_avx512,function,internal)
+des_x16_cbc_enc_avx512:
+ GENERIC_DES_ENC DES
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(des_x16_cbc_dec_avx512,function,internal)
+des_x16_cbc_dec_avx512:
+ GENERIC_DES_DEC DES
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(des3_x16_cbc_enc_avx512,function,internal)
+des3_x16_cbc_enc_avx512:
+ GENERIC_DES_ENC 3DES
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(des3_x16_cbc_dec_avx512,function,internal)
+des3_x16_cbc_dec_avx512:
+ GENERIC_DES_DEC 3DES
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(docsis_des_x16_enc_avx512,function,internal)
+docsis_des_x16_enc_avx512:
+ GENERIC_DES_ENC DOCSIS
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : size in bytes
+align 64
+MKGLOBAL(docsis_des_x16_dec_avx512,function,internal)
+docsis_des_x16_dec_avx512:
+ GENERIC_DES_DEC DOCSIS
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm128_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm128_avx512.asm
new file mode 100644
index 000000000..f9f643b40
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm128_avx512.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM128_MODE 1
+%include "avx512/gcm_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm128_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm128_vaes_avx512.asm
new file mode 100644
index 000000000..2465b22dd
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm128_vaes_avx512.asm
@@ -0,0 +1,32 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM128_MODE 1
+;; single buffer implementation
+%include "avx512/gcm_vaes_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm192_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm192_avx512.asm
new file mode 100644
index 000000000..403ab2f7c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm192_avx512.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM192_MODE 1
+%include "avx512/gcm_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm192_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm192_vaes_avx512.asm
new file mode 100644
index 000000000..348190a2a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm192_vaes_avx512.asm
@@ -0,0 +1,32 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM192_MODE 1
+;; single buffer implementation
+%include "avx512/gcm_vaes_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm256_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm256_avx512.asm
new file mode 100644
index 000000000..141b4b9ca
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm256_avx512.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM256_MODE 1
+%include "avx512/gcm_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm256_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm256_vaes_avx512.asm
new file mode 100644
index 000000000..4daa1b361
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm256_vaes_avx512.asm
@@ -0,0 +1,32 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM256_MODE 1
+;; single buffer implementation
+%include "avx512/gcm_vaes_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm_avx512.asm
new file mode 100644
index 000000000..db940ffe9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm_avx512.asm
@@ -0,0 +1,3536 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+; Tomasz Kantecki
+;
+;
+; References:
+; This code was derived and highly optimized from the code described in paper:
+; Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on Intel Architecture Processors. August, 2010
+; The details of the implementation is explained in:
+; Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode on Intel Architecture Processors. October, 2012.
+;
+;
+;
+;
+; Assumptions:
+;
+;
+;
+; iv:
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Salt (From the SA) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Initialization Vector |
+; | (This is the sequence number from IPSec header) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x1 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+;
+;
+; AAD:
+; AAD will be padded with 0 to the next 16byte multiple
+; for example, assume AAD is a u32 vector
+;
+; if AAD is 8 bytes:
+; AAD[3] = {A0, A1};
+; padded AAD in xmm register = {A1 A0 0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A1) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 32-bit Sequence Number (A0) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 32-bit Sequence Number
+;
+; if AAD is 12 bytes:
+; AAD[3] = {A0, A1, A2};
+; padded AAD in xmm register = {A2 A1 A0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A2) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 64-bit Extended Sequence Number {A1,A0} |
+; | |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 64-bit Extended Sequence Number
+;
+;
+; aadLen:
+; Must be a multiple of 4 bytes and from the definition of the spec.
+; The code additionally supports any aadLen length.
+;
+; TLen:
+; from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+;
+; poly = x^128 + x^127 + x^126 + x^121 + 1
+; throughout the code, one tab and two tab indentations are used. one tab is for GHASH part, two tabs is for AES part.
+;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/clear_regs.asm"
+%include "include/gcm_defines.asm"
+%include "include/gcm_keys_avx2_avx512.asm"
+
+%include "mb_mgr_datastruct.asm"
+%include "job_aes_hmac.asm"
+%include "include/memcpy.asm"
+
+%ifndef GCM128_MODE
+%ifndef GCM192_MODE
+%ifndef GCM256_MODE
+%error "No GCM mode selected for gcm_avx512.asm!"
+%endif
+%endif
+%endif
+
+;; Decide on AES-GCM key size to compile for
+%ifdef GCM128_MODE
+%define NROUNDS 9
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _128 %+ y %+ avx512
+%endif
+
+%ifdef GCM192_MODE
+%define NROUNDS 11
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _192 %+ y %+ avx512
+%endif
+
+%ifdef GCM256_MODE
+%define NROUNDS 13
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _256 %+ y %+ avx512
+%endif
+
+section .text
+default rel
+
+; need to push 4 registers into stack to maintain
+%define STACK_OFFSET 8*4
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define TMP2 16*0 ; Temporary storage for AES State 2 (State 1 is stored in an XMM register)
+%define TMP3 16*1 ; Temporary storage for AES State 3
+%define TMP4 16*2 ; Temporary storage for AES State 4
+%define TMP5 16*3 ; Temporary storage for AES State 5
+%define TMP6 16*4 ; Temporary storage for AES State 6
+%define TMP7 16*5 ; Temporary storage for AES State 7
+%define TMP8 16*6 ; Temporary storage for AES State 8
+%define LOCAL_STORAGE 16*7
+%define VARIABLE_OFFSET LOCAL_STORAGE + XMM_STORAGE
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+; Input: A and B (128-bits each, bit-reflected)
+; Output: C = A*B*x mod poly, (i.e. >>1 )
+; To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+; GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GHASH_MUL 7
+%define %%GH %1 ; 16 Bytes
+%define %%HK %2 ; 16 Bytes
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vpclmulqdq %%T1, %%GH, %%HK, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%T2, %%GH, %%HK, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%T3, %%GH, %%HK, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%GH, %%GH, %%HK, 0x10 ; %%GH = a0*b1
+ vpxor %%GH, %%GH, %%T3
+
+
+ vpsrldq %%T3, %%GH, 8 ; shift-R %%GH 2 DWs
+ vpslldq %%GH, %%GH, 8 ; shift-L %%GH 2 DWs
+
+ vpxor %%T1, %%T1, %%T3
+ vpxor %%GH, %%GH, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%GH, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L %%T2 2 DWs
+
+ vpxor %%GH, %%GH, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%GH, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%GH, %%T3, %%GH, 0x10
+ vpslldq %%GH, %%GH, 4 ; shift-L %%GH 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%GH, %%GH, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%GH, %%GH, %%T1 ; the result is in %%GH
+%endmacro
+
+
+; In PRECOMPUTE, the commands filling Hashkey_i_k are not required for avx512
+; functions, but are kept to allow users to switch cpu architectures between calls
+; of pre, init, update, and finalize.
+%macro PRECOMPUTE 8
+%define %%GDATA %1
+%define %%HK %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+%define %%T6 %8
+
+ ; Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vmovdqa %%T5, %%HK
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^2<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_2], %%T5 ; [HashKey_2] = HashKey^2<<1 mod poly
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^3<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_3], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^4<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_4], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^5<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_5], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^6<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_6], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^7<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_7], %%T5
+
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^8<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_8], %%T5
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; READ_SMALL_DATA_INPUT: Packs xmm register with data when data input is less than 16 bytes.
+; Returns 0 if data has length 0.
+; Input: The input data (INPUT), that data's length (LENGTH).
+; Output: The packed xmm register (OUTPUT).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro READ_SMALL_DATA_INPUT 4
+%define %%OUTPUT %1 ; %%OUTPUT is an xmm register
+%define %%INPUT %2
+%define %%LENGTH %3
+%define %%TMP1 %4
+
+ lea %%TMP1, [rel byte_len_to_mask_table]
+%ifidn __OUTPUT_FORMAT__, win64
+ add %%TMP1, %%LENGTH
+ add %%TMP1, %%LENGTH
+ kmovw k1, [%%TMP1]
+%else
+ kmovw k1, [%%TMP1 + %%LENGTH*2]
+%endif
+ vmovdqu8 XWORD(%%OUTPUT){k1}{z}, [%%INPUT]
+
+%endmacro ; READ_SMALL_DATA_INPUT
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+; Input: The input data (A_IN), that data's length (A_LEN), and the hash key (HASH_KEY).
+; Output: The hash of the data (AAD_HASH).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CALC_AAD_HASH 13
+%define %%A_IN %1
+%define %%A_LEN %2
+%define %%AAD_HASH %3
+%define %%GDATA_KEY %4
+%define %%XTMP0 %5 ; xmm temp reg 5
+%define %%XTMP1 %6 ; xmm temp reg 5
+%define %%XTMP2 %7
+%define %%XTMP3 %8
+%define %%XTMP4 %9
+%define %%XTMP5 %10 ; xmm temp reg 5
+%define %%T1 %11 ; temp reg 1
+%define %%T2 %12
+%define %%T3 %13
+
+
+ mov %%T1, %%A_IN ; T1 = AAD
+ mov %%T2, %%A_LEN ; T2 = aadLen
+ vpxor %%AAD_HASH, %%AAD_HASH
+
+%%_get_AAD_loop128:
+ cmp %%T2, 128
+ jl %%_exit_AAD_loop128
+
+ vmovdqu %%XTMP0, [%%T1 + 16*0]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_8]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+%assign i 1
+%assign j 7
+%rep 7
+ vmovdqu %%XTMP0, [%%T1 + 16*i]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%GDATA_KEY + HashKey_ %+ j]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+%assign i (i + 1)
+%assign j (j - 1)
+%endrep
+
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ sub %%T2, 128
+ je %%_CALC_AAD_done
+
+ add %%T1, 128
+ jmp %%_get_AAD_loop128
+
+%%_exit_AAD_loop128:
+ cmp %%T2, 16
+ jl %%_get_small_AAD_block
+
+ ;; calculate hash_key position to start with
+ mov %%T3, %%T2
+ and %%T3, -16 ; 1 to 7 blocks possible here
+ neg %%T3
+ add %%T3, HashKey_1 + 16
+ lea %%T3, [%%GDATA_KEY + %%T3]
+
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vpxor %%XTMP0, %%AAD_HASH
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP1, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%XTMP2, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%XTMP3, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16 ; move to next data block
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+
+%%_AAD_blocks:
+ vmovdqu %%XTMP0, [%%T1]
+ vpshufb %%XTMP0, [rel SHUF_MASK]
+
+ vmovdqu %%XTMP5, [%%T3]
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ vpxor %%XTMP1, %%XTMP1, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+ vpclmulqdq %%XTMP4, %%XTMP0, %%XTMP5, 0x10
+ vpxor %%XTMP3, %%XTMP3, %%XTMP4
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+ jmp %%_AAD_blocks
+
+%%_AAD_reduce:
+ vpslldq %%XTMP4, %%XTMP3, 8 ; shift-L 2 DWs
+ vpsrldq %%XTMP3, %%XTMP3, 8 ; shift-R 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP4
+ vpxor %%XTMP1, %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqa %%XTMP5, [rel POLY2]
+ vpclmulqdq %%XTMP0, %%XTMP5, %%XTMP2, 0x01
+ vpslldq %%XTMP0, %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ vpxor %%XTMP2, %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%XTMP3, %%XTMP5, %%XTMP2, 0x00
+ vpsrldq %%XTMP3, %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%XTMP4, %%XTMP5, %%XTMP2, 0x10
+ vpslldq %%XTMP4, %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%XTMP4, %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%AAD_HASH, %%XTMP1, %%XTMP4 ; the result is in %%T1
+
+ or %%T2, %%T2
+ je %%_CALC_AAD_done
+
+%%_get_small_AAD_block:
+ vmovdqu %%XTMP0, [%%GDATA_KEY + HashKey]
+ READ_SMALL_DATA_INPUT %%XTMP1, %%T1, %%T2, %%T3
+ ;byte-reflect the AAD data
+ vpshufb %%XTMP1, [rel SHUF_MASK]
+ vpxor %%AAD_HASH, %%XTMP1
+ GHASH_MUL %%AAD_HASH, %%XTMP0, %%XTMP1, %%XTMP2, %%XTMP3, %%XTMP4, %%XTMP5
+
+%%_CALC_AAD_done:
+
+%endmacro ; CALC_AAD_HASH
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks between update calls.
+; Requires the input data be at least 1 byte long.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN), the current data offset (DATA_OFFSET),
+; and whether encoding or decoding (ENC_DEC)
+; Output: A cypher of the first partial block (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10, r12, r13, r15, xmm0, xmm1, xmm2, xmm3, xmm5, xmm6, xmm9, xmm10, xmm11, xmm13
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro PARTIAL_BLOCK 8
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%DATA_OFFSET %6
+%define %%AAD_HASH %7
+%define %%ENC_DEC %8
+
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ je %%_partial_block_done ;Leave Macro if no partial blocks
+
+ cmp %%PLAIN_CYPH_LEN, 16 ;Read in input data without over reading
+ jl %%_fewer_than_16_bytes
+ VXLDR xmm1, [%%PLAIN_CYPH_IN] ;If more than 16 bytes of data, just fill the xmm register
+ jmp %%_data_read
+
+%%_fewer_than_16_bytes:
+ lea r10, [%%PLAIN_CYPH_IN]
+ READ_SMALL_DATA_INPUT xmm1, r10, %%PLAIN_CYPH_LEN, rax
+
+%%_data_read: ;Finished reading in data
+
+ vmovdqu xmm9, [%%GDATA_CTX + PBlockEncKey] ;xmm9 = my_ctx_data.partial_block_enc_key
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ lea r12, [rel SHIFT_MASK]
+
+ add r12, r13 ; adjust the shuffle mask pointer to be able to shift r13 bytes (16-r13 is the number of bytes in plaintext mod 16)
+ vmovdqu xmm2, [r12] ; get the appropriate shuffle mask
+ vpshufb xmm9, xmm2 ;shift right r13 bytes
+
+%ifidn %%ENC_DEC, DEC
+ vmovdqa xmm3, xmm1
+%endif
+ vpxor xmm9, xmm1 ; Cyphertext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask:
+
+ vmovdqu xmm1, [r12 + ALL_F - SHIFT_MASK]; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+%ifidn %%ENC_DEC, DEC
+ vpand xmm3, xmm1
+ vpshufb xmm3, [rel SHUF_MASK]
+ vpshufb xmm3, xmm2
+ vpxor %%AAD_HASH, xmm3
+%else
+ vpshufb xmm9, [rel SHUF_MASK]
+ vpshufb xmm9, xmm2
+ vpxor %%AAD_HASH, xmm9
+%endif
+ cmp r15,0
+ jl %%_partial_incomplete
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_enc_dec_done
+%%_partial_incomplete:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_enc_dec_done:
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+%ifidn %%ENC_DEC, ENC
+ vpshufb xmm9, [rel SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+ vpshufb xmm9, xmm2
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output encrypted Bytes
+ cmp r15,0
+ jl %%_partial_fill
+ mov r12, r13
+ mov r13, 16
+ sub r13, r12 ; Set r13 to be the number of bytes to write out
+ jmp %%_count_set
+%%_partial_fill:
+ mov r13, %%PLAIN_CYPH_LEN
+%%_count_set:
+ lea rax, [rel byte_len_to_mask_table]
+ kmovw k1, [rax + r13*2]
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET]{k1}, xmm9
+ add %%DATA_OFFSET, r13
+%%_partial_block_done:
+%endmacro ; PARTIAL_BLOCK
+
+
+%macro GHASH_SINGLE_MUL 9
+%define %%GDATA %1
+%define %%HASHKEY %2
+%define %%CIPHER %3
+%define %%STATE_11 %4
+%define %%STATE_00 %5
+%define %%STATE_MID %6
+%define %%T1 %7
+%define %%T2 %8
+%define %%FIRST %9
+
+ vmovdqu %%T1, [%%GDATA + %%HASHKEY]
+%ifidn %%FIRST, first
+ vpclmulqdq %%STATE_11, %%CIPHER, %%T1, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%STATE_00, %%CIPHER, %%T1, 0x00 ; %%T4_2 = a0*b0
+ vpclmulqdq %%STATE_MID, %%CIPHER, %%T1, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10 ; %%T5 = a0*b1
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%else
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x11
+ vpxor %%STATE_11, %%STATE_11, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x00
+ vpxor %%STATE_00, %%STATE_00, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x01
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%endif
+
+%endmacro
+
+; if a = number of total plaintext bytes
+; b = floor(a/16)
+; %%num_initial_blocks = b mod 8;
+; encrypt the initial %%num_initial_blocks blocks and apply ghash on the ciphertext
+; %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r14 are used as a pointer only, not modified.
+; Updated AAD_HASH is returned in %%T3
+
+%macro INITIAL_BLOCKS 23
+%define %%GDATA_KEY %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%LENGTH %4
+%define %%DATA_OFFSET %5
+%define %%num_initial_blocks %6 ; can be 0, 1, 2, 3, 4, 5, 6 or 7
+%define %%T1 %7
+%define %%T2 %8
+%define %%T3 %9
+%define %%T4 %10
+%define %%T5 %11
+%define %%CTR %12
+%define %%XMM1 %13
+%define %%XMM2 %14
+%define %%XMM3 %15
+%define %%XMM4 %16
+%define %%XMM5 %17
+%define %%XMM6 %18
+%define %%XMM7 %19
+%define %%XMM8 %20
+%define %%T6 %21
+%define %%T_key %22
+%define %%ENC_DEC %23
+
+%assign i (8-%%num_initial_blocks)
+ ;; Move AAD_HASH to temp reg
+ vmovdqu %%T2, %%XMM8
+ ;; Start AES for %%num_initial_blocks blocks
+ ;; vmovdqu %%CTR, [%%GDATA_CTX + CurCount] ; %%CTR = Y0
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpaddd %%CTR, %%CTR, [rel ONE] ; INCR Y0
+ vmovdqa reg(i), %%CTR
+ vpshufb reg(i), [rel SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+%if(%%num_initial_blocks>0)
+vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vpxor reg(i),reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep
+
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%endif ; %if(%%num_initial_blocks>0)
+
+
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; Write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+ %endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+%assign i (i+1)
+%endrep
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%assign i (9-%%num_initial_blocks)
+%if(%%num_initial_blocks>0)
+ vmovdqa %%T3, reg(i)
+%assign i (i+1)
+%endif
+%if %%num_initial_blocks>1
+%rep %%num_initial_blocks-1
+ vmovdqu [rsp + TMP %+ i], reg(i)
+%assign i (i+1)
+%endrep
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Prepare 8 counter blocks and perform rounds of AES cipher on
+ ;; them, load plain/cipher text and store cipher/plain text.
+ ;; Stitch GHASH computation in between AES rounds.
+ vpaddd %%XMM1, %%CTR, [rel ONE] ; INCR Y0
+ vpaddd %%XMM2, %%CTR, [rel TWO] ; INCR Y0
+ vpaddd %%XMM3, %%XMM1, [rel TWO] ; INCR Y0
+ vpaddd %%XMM4, %%XMM2, [rel TWO] ; INCR Y0
+ vpaddd %%XMM5, %%XMM3, [rel TWO] ; INCR Y0
+ vpaddd %%XMM6, %%XMM4, [rel TWO] ; INCR Y0
+ vpaddd %%XMM7, %%XMM5, [rel TWO] ; INCR Y0
+ vpaddd %%XMM8, %%XMM6, [rel TWO] ; INCR Y0
+ vmovdqa %%CTR, %%XMM8
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+ vpxor %%XMM1, %%XMM1, %%T_key
+ vpxor %%XMM2, %%XMM2, %%T_key
+ vpxor %%XMM3, %%XMM3, %%T_key
+ vpxor %%XMM4, %%XMM4, %%T_key
+ vpxor %%XMM5, %%XMM5, %%T_key
+ vpxor %%XMM6, %%XMM6, %%T_key
+ vpxor %%XMM7, %%XMM7, %%T_key
+ vpxor %%XMM8, %%XMM8, %%T_key
+
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+
+%define %%T4_2 %%T4
+%if(%%num_initial_blocks>0)
+ ;; Hash in AES state
+ ;; T2 - incoming AAD hash
+ vpxor %%T2, %%T3
+
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*1]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*2]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>1)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*3]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*4]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>2)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>3)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*5]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*6]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>4)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*7]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*8]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>5)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*9]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+%ifndef GCM128_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*10]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>6)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%ifdef GCM128_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*10]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif
+
+%ifdef GCM192_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*11]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*12]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif
+%ifdef GCM256_MODE
+ vmovdqu %%T_key, [%%GDATA_KEY+16*11]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*12]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%if(%%num_initial_blocks>7)
+ ;; GDATA, HASHKEY, CIPHER,
+ ;; STATE_11, STATE_00, STATE_MID, T1, T2
+ vmovdqu %%T2, [rsp + TMP %+ j]
+ GHASH_SINGLE_MUL %%GDATA_KEY, HashKey_ %+ k, %%T2, \
+ %%T1, %%T4, %%T6, %%T5, %%T3, not_first
+%endif
+
+%ifdef GCM256_MODE ; GCM256
+ vmovdqu %%T_key, [%%GDATA_KEY+16*13]
+ vaesenc %%XMM1, %%T_key
+ vaesenc %%XMM2, %%T_key
+ vaesenc %%XMM3, %%T_key
+ vaesenc %%XMM4, %%T_key
+ vaesenc %%XMM5, %%T_key
+ vaesenc %%XMM6, %%T_key
+ vaesenc %%XMM7, %%T_key
+ vaesenc %%XMM8, %%T_key
+
+ vmovdqu %%T_key, [%%GDATA_KEY+16*14]
+ vaesenclast %%XMM1, %%T_key
+ vaesenclast %%XMM2, %%T_key
+ vaesenclast %%XMM3, %%T_key
+ vaesenclast %%XMM4, %%T_key
+ vaesenclast %%XMM5, %%T_key
+ vaesenclast %%XMM6, %%T_key
+ vaesenclast %%XMM7, %%T_key
+ vaesenclast %%XMM8, %%T_key
+%endif ; GCM256 mode
+
+%if(%%num_initial_blocks>0)
+ vpsrldq %%T3, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpslldq %%T6, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpxor %%T1, %%T1, %%T3 ; accumulate the results in %%T1:%%T4
+ vpxor %%T4, %%T6, %%T4
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; First phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ ;; First phase of the reduction complete
+ vpxor %%T4, %%T4, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; Second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T4, 0x00
+ ;; Shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+ vpsrldq %%T2, %%T2, 4
+
+ vpclmulqdq %%T4, %%T3, %%T4, 0x10
+ ;; Shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+ vpslldq %%T4, %%T4, 4
+ ;; Second phase of the reduction complete
+ vpxor %%T4, %%T4, %%T2
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; The result is in %%T3
+ vpxor %%T3, %%T1, %%T4
+%else
+ ;; The hash should end up in T3
+ vmovdqa %%T3, %%T2
+%endif
+
+ ;; Final hash is now in T3
+%if %%num_initial_blocks > 0
+ ;; NOTE: obsolete in case %%num_initial_blocks = 0
+ sub %%LENGTH, 16*%%num_initial_blocks
+%endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*0]
+ vpxor %%XMM1, %%XMM1, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*0], %%XMM1
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM1, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*1]
+ vpxor %%XMM2, %%XMM2, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*1], %%XMM2
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM2, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*2]
+ vpxor %%XMM3, %%XMM3, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*2], %%XMM3
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM3, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*3]
+ vpxor %%XMM4, %%XMM4, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*3], %%XMM4
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM4, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*4]
+ vpxor %%XMM5, %%XMM5, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*4], %%XMM5
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM5, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*5]
+ vpxor %%XMM6, %%XMM6, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*5], %%XMM6
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM6, %%T1
+ %endif
+
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*6]
+ vpxor %%XMM7, %%XMM7, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*6], %%XMM7
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM7, %%T1
+ %endif
+
+%if %%num_initial_blocks > 0
+ ;; NOTE: 'jl' is never taken for %%num_initial_blocks = 0
+ ;; This macro is executed for lenght 128 and up,
+ ;; zero length is checked in GCM_ENC_DEC.
+ ;; If the last block is partial then the xor will be done later
+ ;; in ENCRYPT_FINAL_PARTIAL_BLOCK.
+ ;; We know it's partial if LENGTH - 16*num_initial_blocks < 128
+ cmp %%LENGTH, 128
+ jl %%_initial_skip_last_word_write
+%endif
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*7]
+ vpxor %%XMM8, %%XMM8, %%T1
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*7], %%XMM8
+ %ifidn %%ENC_DEC, DEC
+ vmovdqa %%XMM8, %%T1
+ %endif
+
+ ;; Update %%LENGTH with the number of blocks processed
+ sub %%LENGTH, 16
+ add %%DATA_OFFSET, 16
+%%_initial_skip_last_word_write:
+ sub %%LENGTH, 128-16
+ add %%DATA_OFFSET, 128-16
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ ;; Combine GHASHed value with the corresponding ciphertext
+ vpxor %%XMM1, %%XMM1, %%T3
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_initial_blocks_done:
+
+
+%endmacro
+
+;;; INITIAL_BLOCKS macro with support for a partial final block.
+;;; num_initial_blocks is expected to include the partial final block
+;;; in the count.
+%macro INITIAL_BLOCKS_PARTIAL 25
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%LENGTH %5
+%define %%DATA_OFFSET %6
+%define %%num_initial_blocks %7 ; can be 1, 2, 3, 4, 5, 6 or 7 (not 0)
+%define %%T1 %8
+%define %%T2 %9
+%define %%T3 %10 ; [out] hash value
+%define %%T4 %11
+%define %%T5 %12
+%define %%CTR %13
+%define %%XMM1 %14
+%define %%XMM2 %15
+%define %%XMM3 %16
+%define %%XMM4 %17
+%define %%XMM5 %18
+%define %%XMM6 %19
+%define %%XMM7 %20
+%define %%XMM8 %21 ; [in] hash value
+%define %%T6 %22
+%define %%T_key %23
+%define %%ENC_DEC %24
+%define %%INSTANCE_TYPE %25
+
+ ;; Move AAD_HASH to temp reg
+ vmovdqu %%T2, %%XMM8
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ ;; Compute AES counters
+ vpaddd %%CTR, %%CTR, [rel ONE] ; INCR Y0
+ vmovdqa reg(i), %%CTR
+ vpshufb reg(i), [rel SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ ; Start AES for %%num_initial_blocks blocks
+ vpxor reg(i),reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep
+
+
+vmovdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ vaesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Hash all but the last block of data
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks-1
+ ;; Encrypt the message for all but the last block
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+%ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+%endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+%assign i (i+1)
+%endrep
+
+%if %%num_initial_blocks > 1
+ ;; The final block of data may be <16B
+ sub %%LENGTH, 16*(%%num_initial_blocks-1)
+%endif
+
+%if %%num_initial_blocks < 8
+ ;; NOTE: the 'jl' is always taken for num_initial_blocks = 8.
+ ;; This is run in the context of GCM_ENC_DEC_SMALL for length < 128.
+ cmp %%LENGTH, 16
+ jl %%_small_initial_partial_block
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Handle a full length final block - encrypt and hash all blocks
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ sub %%LENGTH, 16
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+
+ ;; Encrypt the message
+ VXLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vpxor reg(i), reg(i), %%T1
+ ;; write back ciphertext for %%num_initial_blocks blocks
+ VXSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i)
+ add %%DATA_OFFSET, 16
+%ifidn %%ENC_DEC, DEC
+ vmovdqa reg(i), %%T1
+%endif
+ ;; Prepare ciphertext for GHASH computations
+ vpshufb reg(i), [rel SHUF_MASK]
+
+ ;; Hash all of the data
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+%assign last_block_to_hash 0
+
+%if(%%num_initial_blocks>last_block_to_hash)
+ ;; Hash in AES state
+ vpxor %%T2, reg(j)
+
+ ;; T2 - incoming AAD hash
+ ;; reg(i) holds ciphertext
+ ;; T5 - hash key
+ ;; T6 - updated xor
+ ;; reg(1)/xmm1 should now be available for tmp use
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T1, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T4, %%T2, %%T5, 0x00 ; %%T4 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%assign rep_count (%%num_initial_blocks-1)
+%rep rep_count
+
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T3, reg(j), %%T5, 0x11
+ vpxor %%T1, %%T1, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x00
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%endrep
+
+ ;; Record that a reduction is needed
+ mov r12, 1
+
+ jmp %%_small_initial_compute_hash
+
+
+%endif ; %if %%num_initial_blocks < 8
+
+%%_small_initial_partial_block:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Handle ghash for a <16B final block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;; In this case if it's a single call to encrypt we can
+ ;; hash all of the data but if it's an init / update / finalize
+ ;; series of call we need to leave the last block if it's
+ ;; less than a full block of data.
+
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], reg(i)
+ ;; Handle a partial final block
+ ;; GDATA, KEY, T1, T2
+ ;; r13 - length
+ ;; LT16 - indicates type of read and that the buffer is less than 16 bytes long
+ ;; NOTE: could be replaced with %%LENGTH but at this point
+ ;; %%LENGTH is always less than 16.
+ ;; No PLAIN_CYPH_LEN argument available in this macro.
+ ENCRYPT_FINAL_PARTIAL_BLOCK reg(i), %%T1, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, LT16, %%ENC_DEC, %%DATA_OFFSET
+ vpshufb reg(i), [rel SHUF_MASK]
+
+%ifidn %%INSTANCE_TYPE, multi_call
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks-1)
+%assign last_block_to_hash 1
+%else
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+%assign k (%%num_initial_blocks)
+%assign last_block_to_hash 0
+%endif
+
+%if(%%num_initial_blocks>last_block_to_hash)
+ ;; Record that a reduction is needed
+ mov r12, 1
+ ;; Hash in AES state
+ vpxor %%T2, reg(j)
+
+ ;; T2 - incoming AAD hash
+ ;; reg(i) holds ciphertext
+ ;; T5 - hash key
+ ;; T6 - updated xor
+ ;; reg(1)/xmm1 should now be available for tmp use
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T1, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T4, %%T2, %%T5, 0x00 ; %%T4 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+%else
+ ;; Record that a reduction is not needed -
+ ;; In this case no hashes are computed because there
+ ;; is only one initial block and it is < 16B in length.
+ xor r12, r12
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%ifidn %%INSTANCE_TYPE, multi_call
+%assign rep_count (%%num_initial_blocks-2)
+%%_multi_call_hash:
+%else
+%assign rep_count (%%num_initial_blocks-1)
+%endif
+
+%if rep_count < 0
+ ;; fix for negative rep_count
+%assign rep_count 0
+%endif
+
+%rep rep_count
+
+ vmovdqu %%T5, [%%GDATA_KEY + HashKey_ %+ k]
+ vpclmulqdq %%T3, reg(j), %%T5, 0x11
+ vpxor %%T1, %%T1, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x00
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, reg(j), %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+%assign i (i+1)
+%assign j (j+1)
+%assign k (k-1)
+%endrep
+
+%%_small_initial_compute_hash:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Ghash reduction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%if(%%num_initial_blocks=1)
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; We only need to check if a reduction is needed if
+ ;; initial_blocks == 1 and init/update/final is being used.
+ ;; In this case we may just have a partial block, and that
+ ;; gets hashed in finalize.
+ ;; cmp r12, 0
+ or r12, r12
+ je %%_no_reduction_needed
+%endif
+%endif
+
+ vpsrldq %%T3, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpslldq %%T6, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpxor %%T1, %%T1, %%T3 ; accumulate the results in %%T1:%%T4
+ vpxor %%T4, %%T6, %%T4
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; First phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x01
+ ;; shift-L xmm2 2 DWs
+ vpslldq %%T2, %%T2, 8
+ vpxor %%T4, %%T4, %%T2
+
+ ;; First phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Second phase of the reduction
+
+ vpclmulqdq %%T2, %%T3, %%T4, 0x00
+ ;; Shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+ vpsrldq %%T2, %%T2, 4
+
+ vpclmulqdq %%T4, %%T3, %%T4, 0x10
+ ;; Shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+ vpslldq %%T4, %%T4, 4
+
+ vpxor %%T4, %%T4, %%T2
+ ;; Second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T3, %%T1, %%T4
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If using init/update/finalize, we need to xor any partial block data
+ ;; into the hash.
+%if %%num_initial_blocks > 1
+ ;; NOTE: for %%num_initial_blocks = 0 the xor never takes place
+%if %%num_initial_blocks != 8
+ ;; NOTE: for %%num_initial_blocks = 8, %%LENGTH, stored in [PBlockLen] is never zero
+ cmp qword [%%GDATA_CTX + PBlockLen], 0
+ je %%_no_partial_block_xor
+%endif ; %%num_initial_blocks != 8
+ vpxor %%T3, %%T3, reg(8)
+%%_no_partial_block_xor:
+%endif ; %%num_initial_blocks > 1
+%endif ; %%INSTANCE_TYPE, multi_call
+
+%if(%%num_initial_blocks=1)
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: %%_no_reduction_needed case only valid for
+ ;; multi_call with initial_blocks = 1.
+ ;; Look for comment above around '_no_reduction_needed'
+ ;; The jmp below is obsolete as the code will fall through.
+
+ ;; The result is in %%T3
+ jmp %%_after_reduction
+
+%%_no_reduction_needed:
+ ;; The hash should end up in T3. The only way we should get here is if
+ ;; there is a partial block of data, so xor that into the hash.
+ vpxor %%T3, %%T2, reg(8)
+%endif ; %%INSTANCE_TYPE = multi_call
+%endif ; %%num_initial_blocks=1
+
+%%_after_reduction:
+ ;; Final hash is now in T3
+
+%endmacro ; INITIAL_BLOCKS_PARTIAL
+
+
+
+; encrypt 8 blocks at a time
+; ghash the 8 previously encrypted ciphertext blocks
+; %%GDATA (KEY), %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN are used as pointers only, not modified
+; %%DATA_OFFSET is the data offset value
+%macro GHASH_8_ENCRYPT_8_PARALLEL 23
+%define %%GDATA %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%DATA_OFFSET %4
+%define %%T1 %5
+%define %%T2 %6
+%define %%T3 %7
+%define %%T4 %8
+%define %%T5 %9
+%define %%T6 %10
+%define %%CTR %11
+%define %%XMM1 %12
+%define %%XMM2 %13
+%define %%XMM3 %14
+%define %%XMM4 %15
+%define %%XMM5 %16
+%define %%XMM6 %17
+%define %%XMM7 %18
+%define %%XMM8 %19
+%define %%T7 %20
+%define %%loop_idx %21
+%define %%ENC_DEC %22
+%define %%FULL_PARTIAL %23
+
+ vmovdqa %%T2, %%XMM1
+ vmovdqu [rsp + TMP2], %%XMM2
+ vmovdqu [rsp + TMP3], %%XMM3
+ vmovdqu [rsp + TMP4], %%XMM4
+ vmovdqu [rsp + TMP5], %%XMM5
+ vmovdqu [rsp + TMP6], %%XMM6
+ vmovdqu [rsp + TMP7], %%XMM7
+ vmovdqu [rsp + TMP8], %%XMM8
+
+%ifidn %%loop_idx, in_order
+ vpaddd %%XMM1, %%CTR, [rel ONE] ; INCR CNT
+ vmovdqu %%T5, [rel TWO]
+ vpaddd %%XMM2, %%CTR, %%T5
+ vpaddd %%XMM3, %%XMM1, %%T5
+ vpaddd %%XMM4, %%XMM2, %%T5
+ vpaddd %%XMM5, %%XMM3, %%T5
+ vpaddd %%XMM6, %%XMM4, %%T5
+ vpaddd %%XMM7, %%XMM5, %%T5
+ vpaddd %%XMM8, %%XMM6, %%T5
+ vmovdqa %%CTR, %%XMM8
+
+ vmovdqu %%T5, [rel SHUF_MASK]
+ vpshufb %%XMM1, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM2, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM3, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM4, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM5, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM6, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM7, %%T5 ; perform a 16Byte swap
+ vpshufb %%XMM8, %%T5 ; perform a 16Byte swap
+%else
+ vpaddd %%XMM1, %%CTR, [rel ONEf] ; INCR CNT
+ vmovdqu %%T5, [rel TWOf]
+ vpaddd %%XMM2, %%CTR, %%T5
+ vpaddd %%XMM3, %%XMM1, %%T5
+ vpaddd %%XMM4, %%XMM2, %%T5
+ vpaddd %%XMM5, %%XMM3, %%T5
+ vpaddd %%XMM6, %%XMM4, %%T5
+ vpaddd %%XMM7, %%XMM5, %%T5
+ vpaddd %%XMM8, %%XMM6, %%T5
+ vmovdqa %%CTR, %%XMM8
+%endif
+
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*0]
+ vpxor %%XMM1, %%XMM1, %%T1
+ vpxor %%XMM2, %%XMM2, %%T1
+ vpxor %%XMM3, %%XMM3, %%T1
+ vpxor %%XMM4, %%XMM4, %%T1
+ vpxor %%XMM5, %%XMM5, %%T1
+ vpxor %%XMM6, %%XMM6, %%T1
+ vpxor %%XMM7, %%XMM7, %%T1
+ vpxor %%XMM8, %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T1, [%%GDATA + 16*1]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [%%GDATA + 16*2]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+ vpclmulqdq %%T4, %%T2, %%T5, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%T7, %%T2, %%T5, 0x00 ; %%T7 = a0*b0
+ vpclmulqdq %%T6, %%T2, %%T5, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T5, %%T2, %%T5, 0x10 ; %%T5 = a0*b1
+ vpxor %%T6, %%T6, %%T5
+
+ vmovdqu %%T1, [%%GDATA + 16*3]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP2]
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*4]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu %%T1, [rsp + TMP3]
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*5]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+
+ vmovdqu %%T1, [rsp + TMP4]
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*6]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP5]
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*7]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP6]
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vmovdqu %%T1, [%%GDATA + 16*8]
+ vaesenc %%XMM1, %%T1
+ vaesenc %%XMM2, %%T1
+ vaesenc %%XMM3, %%T1
+ vaesenc %%XMM4, %%T1
+ vaesenc %%XMM5, %%T1
+ vaesenc %%XMM6, %%T1
+ vaesenc %%XMM7, %%T1
+ vaesenc %%XMM8, %%T1
+
+ vmovdqu %%T1, [rsp + TMP7]
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T4, %%T4, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + 16*9]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T1, [rsp + TMP8]
+ vmovdqu %%T5, [%%GDATA + HashKey]
+
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x01
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x10
+ vpxor %%T6, %%T6, %%T3
+
+ vpclmulqdq %%T3, %%T1, %%T5, 0x11
+ vpxor %%T1, %%T4, %%T3
+
+
+ vmovdqu %%T5, [%%GDATA + 16*10]
+ %ifndef GCM128_MODE ; GCM192 or GCM256
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*11]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*12]
+%endif
+%ifdef GCM256_MODE
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*13]
+ vaesenc %%XMM1, %%T5
+ vaesenc %%XMM2, %%T5
+ vaesenc %%XMM3, %%T5
+ vaesenc %%XMM4, %%T5
+ vaesenc %%XMM5, %%T5
+ vaesenc %%XMM6, %%T5
+ vaesenc %%XMM7, %%T5
+ vaesenc %%XMM8, %%T5
+
+ vmovdqu %%T5, [%%GDATA + 16*14]
+%endif ; GCM256
+
+%assign i 0
+%assign j 1
+%rep 8
+
+ ;; SNP TBD: This is pretty ugly - consider whether just XORing the
+ ;; data in after vaesenclast is simpler and performant. Would
+ ;; also have to ripple it through partial block and ghash_mul_8.
+%ifidn %%FULL_PARTIAL, full
+ %ifdef NT_LD
+ VXLDR %%T2, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ vpxor %%T2, %%T2, %%T5
+ %else
+ vpxor %%T2, %%T5, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ %endif
+
+ %ifidn %%ENC_DEC, ENC
+ vaesenclast reg(j), reg(j), %%T2
+ %else
+ vaesenclast %%T3, reg(j), %%T2
+ vpxor reg(j), %%T2, %%T5
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*i], %%T3
+ %endif
+
+%else
+ ; Don't read the final data during partial block processing
+ %ifdef NT_LD
+ %if (i<7)
+ VXLDR %%T2, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ vpxor %%T2, %%T2, %%T5
+ %else
+ ;; Stage the key directly in T2 rather than hash it with plaintext
+ vmovdqu %%T2, %%T5
+ %endif
+ %else
+ %if (i<7)
+ vpxor %%T2, %%T5, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+ %else
+ ;; Stage the key directly in T2 rather than hash it with plaintext
+ vmovdqu %%T2, %%T5
+ %endif
+ %endif
+
+ %ifidn %%ENC_DEC, ENC
+ vaesenclast reg(j), reg(j), %%T2
+ %else
+ %if (i<7)
+ vaesenclast %%T3, reg(j), %%T2
+ vpxor reg(j), %%T2, %%T5
+ ;; Do not read the data since it could fault
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*i], %%T3
+ %else
+ vaesenclast reg(j), reg(j), %%T2
+ %endif
+ %endif
+%endif
+
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ vpslldq %%T3, %%T6, 8 ; shift-L %%T3 2 DWs
+ vpsrldq %%T6, %%T6, 8 ; shift-R %%T2 2 DWs
+ vpxor %%T7, %%T7, %%T3
+ vpxor %%T1, %%T1, %%T6 ; accumulate the results in %%T1:%%T7
+
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ %ifidn %%ENC_DEC, ENC
+ ; Write to the Ciphertext buffer
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*0], %%XMM1
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*1], %%XMM2
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*2], %%XMM3
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*3], %%XMM4
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*4], %%XMM5
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*5], %%XMM6
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*6], %%XMM7
+ %ifidn %%FULL_PARTIAL, full
+ ;; Avoid writing past the buffer if handling a partial block
+ VXSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*7], %%XMM8
+ %endif
+ %endif
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T1, %%T1, %%T4 ; the result is in %%T1
+
+ vpshufb %%XMM1, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM2, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM3, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM4, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM5, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM6, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM7, [rel SHUF_MASK] ; perform a 16Byte swap
+ vpshufb %%XMM8, [rel SHUF_MASK] ; perform a 16Byte swap
+
+
+ vpxor %%XMM1, %%T1
+
+
+%endmacro ; GHASH_8_ENCRYPT_8_PARALLEL
+
+
+; GHASH the last 4 ciphertext blocks.
+%macro GHASH_LAST_8 16
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+%define %%XMM8 %16
+
+ ;; Karatsuba Method
+
+ vmovdqu %%T5, [%%GDATA + HashKey_8]
+
+ vpshufd %%T2, %%XMM1, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM1
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T6, %%XMM1, %%T5, 0x11
+ vpclmulqdq %%T7, %%XMM1, %%T5, 0x00
+
+ vpclmulqdq %%XMM1, %%T2, %%T3, 0x00
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM2
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpshufd %%T2, %%XMM3, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM3
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpshufd %%T2, %%XMM4, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM4
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpshufd %%T2, %%XMM5, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM5
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpshufd %%T2, %%XMM6, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM6
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpshufd %%T2, %%XMM7, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM7
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey]
+ vpshufd %%T2, %%XMM8, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM8
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM8, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+ vpxor %%XMM1, %%XMM1, %%T6
+ vpxor %%T2, %%XMM1, %%T7
+
+
+
+
+ vpslldq %%T4, %%T2, 8
+ vpsrldq %%T2, %%T2, 8
+
+ vpxor %%T7, %%T7, %%T4
+ vpxor %%T6, %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L %%T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T6, %%T6, %%T4 ; the result is in %%T6
+%endmacro
+
+
+; GHASH the last 4 ciphertext blocks.
+%macro GHASH_LAST_7 15
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+
+ ;; Karatsuba Method
+
+ vmovdqu %%T5, [%%GDATA + HashKey_7]
+
+ vpshufd %%T2, %%XMM1, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM1
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T6, %%XMM1, %%T5, 0x11
+ vpclmulqdq %%T7, %%XMM1, %%T5, 0x00
+
+ vpclmulqdq %%XMM1, %%T2, %%T3, 0x00
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_6]
+ vpshufd %%T2, %%XMM2, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM2
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM2, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_5]
+ vpshufd %%T2, %%XMM3, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM3
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM3, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_4]
+ vpshufd %%T2, %%XMM4, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM4
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM4, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_3]
+ vpshufd %%T2, %%XMM5, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM5
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM5, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_2]
+ vpshufd %%T2, %%XMM6, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM6
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM6, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vmovdqu %%T5, [%%GDATA + HashKey_1]
+ vpshufd %%T2, %%XMM7, 01001110b
+ vpshufd %%T3, %%T5, 01001110b
+ vpxor %%T2, %%T2, %%XMM7
+ vpxor %%T3, %%T3, %%T5
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x11
+ vpxor %%T6, %%T6, %%T4
+
+ vpclmulqdq %%T4, %%XMM7, %%T5, 0x00
+ vpxor %%T7, %%T7, %%T4
+
+ vpclmulqdq %%T2, %%T2, %%T3, 0x00
+
+ vpxor %%XMM1, %%XMM1, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;
+
+ vpxor %%XMM1, %%XMM1, %%T6
+ vpxor %%T2, %%XMM1, %%T7
+
+
+
+
+ vpslldq %%T4, %%T2, 8
+ vpsrldq %%T2, %%T2, 8
+
+ vpxor %%T7, %%T7, %%T4
+ vpxor %%T6, %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqu %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%T7, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L xmm2 2 DWs
+
+ vpxor %%T7, %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%T7, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R %%T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq %%T4, %%T3, %%T7, 0x10
+ vpslldq %%T4, %%T4, 4 ; shift-L %%T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor %%T4, %%T4, %%T2 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vpxor %%T6, %%T6, %%T4 ; the result is in %%T6
+%endmacro
+
+
+
+;;; Handle encryption of the final partial block
+;;; IN:
+;;; r13 - Number of bytes to read
+;;; MODIFIES:
+;;; KEY - Key for encrypting the partial block
+;;; SMASHES:
+;;; rax, T1
+;;; Note:
+;;; PLAIN_CYPH_LEN is unused at this stage. Previously:
+;;; it was used to determine if buffer is big enough to do
+;;; a 16 byte read & shift.
+;;; 'LT16' is passed here only if buffer is known to be smaller
+;;; than 16 bytes.
+;;; Any other value passed here will result in 16 byte read
+;;; code path.
+%macro ENCRYPT_FINAL_PARTIAL_BLOCK 7
+%define %%KEY %1
+%define %%T1 %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%DATA_OFFSET %7
+
+ ;; %%PLAIN_CYPH_IN + %%DATA_OFFSET
+ ;; - input data address
+ ;; r13 - input data length
+ ;; rax - temp registers
+ ;; out:
+ ;; T1 - packed output
+ ;; k1 - valid byte mask
+ READ_SMALL_DATA_INPUT %%T1, %%PLAIN_CYPH_IN+%%DATA_OFFSET, r13, rax
+
+ ;; At this point T1 contains the partial block data
+ ;; Plaintext XOR E(K, Yn)
+ vpxorq %%KEY, %%KEY, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Output r13 Bytes
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET]{k1}, %%KEY
+
+%ifidn %%ENC_DEC, DEC
+ ;; If decrypt, restore the ciphertext into %%KEY
+ vmovdqa64 %%KEY, %%T1
+%else
+ vmovdqu8 %%KEY{k1}{z}, %%KEY
+%endif
+%endmacro ; ENCRYPT_FINAL_PARTIAL_BLOCK
+
+
+
+; Encryption of a single block
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ vpxor %%XMM0, %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ vaesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep
+ vaesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+
+;; Start of Stack Setup
+
+%macro FUNC_SAVE 0
+ ;; Required for Update/GMC_ENC
+ ;the number of pushes must equal STACK_OFFSET
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+ vmovdqu [rsp + LOCAL_STORAGE + 1*16],xmm7
+ vmovdqu [rsp + LOCAL_STORAGE + 2*16],xmm8
+ vmovdqu [rsp + LOCAL_STORAGE + 3*16],xmm9
+ vmovdqu [rsp + LOCAL_STORAGE + 4*16],xmm10
+ vmovdqu [rsp + LOCAL_STORAGE + 5*16],xmm11
+ vmovdqu [rsp + LOCAL_STORAGE + 6*16],xmm12
+ vmovdqu [rsp + LOCAL_STORAGE + 7*16],xmm13
+ vmovdqu [rsp + LOCAL_STORAGE + 8*16],xmm14
+ vmovdqu [rsp + LOCAL_STORAGE + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + LOCAL_STORAGE + 9*16]
+ vmovdqu xmm14, [rsp + LOCAL_STORAGE + 8*16]
+ vmovdqu xmm13, [rsp + LOCAL_STORAGE + 7*16]
+ vmovdqu xmm12, [rsp + LOCAL_STORAGE + 6*16]
+ vmovdqu xmm11, [rsp + LOCAL_STORAGE + 5*16]
+ vmovdqu xmm10, [rsp + LOCAL_STORAGE + 4*16]
+ vmovdqu xmm9, [rsp + LOCAL_STORAGE + 3*16]
+ vmovdqu xmm8, [rsp + LOCAL_STORAGE + 2*16]
+ vmovdqu xmm7, [rsp + LOCAL_STORAGE + 1*16]
+ vmovdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+;; Required for Update/GMC_ENC
+ mov rsp, r14
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_INIT initializes a gcm_context_data struct to prepare for encoding/decoding.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), IV,
+; Additional Authentication data (A_IN), Additional Data length (A_LEN).
+; Output: Updated GDATA_CTX with the hash of A_IN (AadHash) and initialized other parts of GDATA_CTX.
+; Clobbers rax, r10-r13, and xmm0-xmm6
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_INIT 8
+%define %%GDATA_KEY %1 ; [in] GCM expanded keys pointer
+%define %%GDATA_CTX %2 ; [in] GCM context pointer
+%define %%IV %3 ; [in] IV pointer
+%define %%A_IN %4 ; [in] AAD pointer
+%define %%A_LEN %5 ; [in] AAD length in bytes
+%define %%GPR1 %6 ; temp GPR
+%define %%GPR2 %7 ; temp GPR
+%define %%GPR3 %8 ; temp GPR
+
+%define %%AAD_HASH xmm14
+
+ CALC_AAD_HASH %%A_IN, %%A_LEN, %%AAD_HASH, %%GDATA_KEY, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, %%GPR1, %%GPR2, %%GPR3
+
+ mov %%GPR1, %%A_LEN
+ vmovdqu [%%GDATA_CTX + AadHash], %%AAD_HASH ; ctx_data.aad hash = aad_hash
+ mov [%%GDATA_CTX + AadLen], %%GPR1 ; ctx_data.aad_length = aad_length
+
+ xor %%GPR1, %%GPR1
+ mov [%%GDATA_CTX + InLen], %%GPR1 ; ctx_data.in_length = 0
+ mov [%%GDATA_CTX + PBlockLen], %%GPR1 ; ctx_data.partial_block_length = 0
+
+ ;; read 12 IV bytes and pad with 0x00000001
+ mov %%GPR2, %%IV
+ vmovd xmm3, [%%GPR2 + 8]
+ vpslldq xmm3, 8
+ vmovq xmm2, [%%GPR2]
+ vmovdqa xmm4, [rel ONEf]
+ vpternlogq xmm2, xmm3, xmm4, 0xfe ; xmm2 = xmm2 or xmm3 or xmm4
+
+ vmovdqu [%%GDATA_CTX + OrigIV], xmm2 ; ctx_data.orig_IV = iv
+
+ ;; store IV as counter in LE format
+ vpshufb xmm2, [rel SHUF_MASK]
+ vmovdqu [%%GDATA_CTX + CurCount], xmm2 ; ctx_data.current_counter = iv
+%endmacro
+
+%macro GCM_ENC_DEC_SMALL 12
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%DATA_OFFSET %7
+%define %%LENGTH %8 ; assumed r13
+%define %%NUM_BLOCKS %9
+%define %%CTR %10 ; assumed xmm9
+%define %%HASH_OUT %11 ; assumed xmm14
+%define %%INSTANCE_TYPE %12
+
+ ;; NOTE: the check below is obsolete in current implementation. The check is already done in GCM_ENC_DEC.
+ ;; cmp %%NUM_BLOCKS, 0
+ ;; je %%_small_initial_blocks_encrypted
+ cmp %%NUM_BLOCKS, 8
+ je %%_small_initial_num_blocks_is_8
+ cmp %%NUM_BLOCKS, 7
+ je %%_small_initial_num_blocks_is_7
+ cmp %%NUM_BLOCKS, 6
+ je %%_small_initial_num_blocks_is_6
+ cmp %%NUM_BLOCKS, 5
+ je %%_small_initial_num_blocks_is_5
+ cmp %%NUM_BLOCKS, 4
+ je %%_small_initial_num_blocks_is_4
+ cmp %%NUM_BLOCKS, 3
+ je %%_small_initial_num_blocks_is_3
+ cmp %%NUM_BLOCKS, 2
+ je %%_small_initial_num_blocks_is_2
+
+ jmp %%_small_initial_num_blocks_is_1
+
+
+%%_small_initial_num_blocks_is_8:
+ ;; r13 - %%LENGTH
+ ;; xmm12 - T1
+ ;; xmm13 - T2
+ ;; xmm14 - T3 - AAD HASH OUT when not producing 8 AES keys
+ ;; xmm15 - T4
+ ;; xmm11 - T5
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1 - Cipher + Hash when producing 8 AES keys
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8 - AAD HASH IN
+ ;; xmm10 - T6
+ ;; xmm0 - T_key
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 8, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_7:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 7, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_6:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 6, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_5:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 5, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_4:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 4, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_3:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 3, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_2:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 2, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+ jmp %%_small_initial_blocks_encrypted
+
+%%_small_initial_num_blocks_is_1:
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, 1, \
+ xmm12, xmm13, %%HASH_OUT, xmm15, xmm11, %%CTR, \
+ xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, \
+ xmm10, xmm0, %%ENC_DEC, %%INSTANCE_TYPE
+%%_small_initial_blocks_encrypted:
+
+%endmacro ; GCM_ENC_DEC_SMALL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context_data struct
+; has been initialized by GCM_INIT
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: gcm_key_data struct* (GDATA_KEY), gcm_context_data *(GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN) and whether encoding or decoding (ENC_DEC).
+; Output: A cypher of the given plain text (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10-r15, and xmm0-xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_ENC_DEC 7
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%INSTANCE_TYPE %7
+%define %%DATA_OFFSET r11
+
+; Macro flow:
+; calculate the number of 16byte blocks in the message
+; process (number of 16byte blocks) mod 8 '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+; process 8 16 byte blocks at a time until all are done '%%_encrypt_by_8_new .. %%_eight_cipher_left'
+; if there is a block of less tahn 16 bytes process it '%%_zero_cipher_left .. %%_multiple_of_16_bytes'
+
+%ifidn __OUTPUT_FORMAT__, win64
+ cmp %%PLAIN_CYPH_LEN, 0
+%else
+ or %%PLAIN_CYPH_LEN, %%PLAIN_CYPH_LEN
+%endif
+ je %%_enc_dec_done
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+ ;; Update length of data processed
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + InLen], rax
+%else
+ add [%%GDATA_CTX + InLen], %%PLAIN_CYPH_LEN
+%endif
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+ vmovdqu xmm8, [%%GDATA_CTX + AadHash]
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: partial block processing makes only sense for multi_call here.
+ ;; Used for the update flow - if there was a previous partial
+ ;; block fill the remaining bytes here.
+ PARTIAL_BLOCK %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%DATA_OFFSET, xmm8, %%ENC_DEC
+%endif
+
+ ;; lift CTR set from initial_blocks to here
+%ifidn %%INSTANCE_TYPE, single_call
+ vmovdqu xmm9, xmm2
+%else
+ vmovdqu xmm9, [%%GDATA_CTX + CurCount]
+%endif
+
+ ;; Save the amount of data left to process in r10
+ mov r13, %%PLAIN_CYPH_LEN
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: %%DATA_OFFSET is zero in single_call case.
+ ;; Consequently PLAIN_CYPH_LEN will never be zero after
+ ;; %%DATA_OFFSET subtraction below.
+ sub r13, %%DATA_OFFSET
+
+ ;; There may be no more data if it was consumed in the partial block.
+ cmp r13, 0
+ je %%_enc_dec_done
+%endif ; %%INSTANCE_TYPE, multi_call
+ mov r10, r13
+
+ ;; Determine how many blocks to process in INITIAL
+ mov r12, r13
+ shr r12, 4
+ and r12, 7
+
+ ;; Process one additional block in INITIAL if there is a partial block
+ and r10, 0xf
+ blsmsk r10, r10 ; Set CF if zero
+ cmc ; Flip CF
+ adc r12, 0x0 ; Process an additional INITIAL block if CF set
+
+ ;; Less than 127B will be handled by the small message code, which
+ ;; can process up to 7 16B blocks.
+ cmp r13, 128
+ jge %%_large_message_path
+
+ GCM_ENC_DEC_SMALL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%ENC_DEC, %%DATA_OFFSET, r13, r12, xmm9, xmm14, %%INSTANCE_TYPE
+ jmp %%_ghash_done
+
+%%_large_message_path:
+ and r12, 0x7 ; Still, don't allow 8 INITIAL blocks since this will
+ ; can be handled by the x8 partial loop.
+
+ cmp r12, 0
+ je %%_initial_num_blocks_is_0
+ cmp r12, 7
+ je %%_initial_num_blocks_is_7
+ cmp r12, 6
+ je %%_initial_num_blocks_is_6
+ cmp r12, 5
+ je %%_initial_num_blocks_is_5
+ cmp r12, 4
+ je %%_initial_num_blocks_is_4
+ cmp r12, 3
+ je %%_initial_num_blocks_is_3
+ cmp r12, 2
+ je %%_initial_num_blocks_is_2
+
+ jmp %%_initial_num_blocks_is_1
+
+%%_initial_num_blocks_is_7:
+ ;; r13 - %%LENGTH
+ ;; xmm12 - T1
+ ;; xmm13 - T2
+ ;; xmm14 - T3 - AAD HASH OUT when not producing 8 AES keys
+ ;; xmm15 - T4
+ ;; xmm11 - T5
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1 - Cipher + Hash when producing 8 AES keys
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8 - AAD HASH IN
+ ;; xmm10 - T6
+ ;; xmm0 - T_key
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 7, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_6:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 6, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_5:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 5, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_4:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 4, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_3:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 3, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_2:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 2, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_1:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 1, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_0:
+ INITIAL_BLOCKS %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 0, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+
+
+%%_initial_blocks_encrypted:
+ ;; The entire message was encrypted processed in initial and now need to be hashed
+ cmp r13, 0
+ je %%_encrypt_done
+
+ ;; Encrypt the final <16 byte (partial) block, then hash
+ cmp r13, 16
+ jl %%_encrypt_final_partial
+
+ ;; Process 7 full blocks plus a partial block
+ cmp r13, 128
+ jl %%_encrypt_by_8_partial
+
+
+%%_encrypt_by_8_parallel:
+ ;; in_order vs. out_order is an optimization to increment the counter without shuffling
+ ;; it back into little endian. r15d keeps track of when we need to increent in order so
+ ;; that the carry is handled correctly.
+ vmovd r15d, xmm9
+ and r15d, 255
+ vpshufb xmm9, [rel SHUF_MASK]
+
+
+%%_encrypt_by_8_new:
+ cmp r15d, 255-8
+ jg %%_encrypt_by_8
+
+
+
+ ;; xmm0 - T1
+ ;; xmm10 - T2
+ ;; xmm11 - T3
+ ;; xmm12 - T4
+ ;; xmm13 - T5
+ ;; xmm14 - T6
+ ;; xmm9 - CTR
+ ;; xmm1 - XMM1
+ ;; xmm2 - XMM2
+ ;; xmm3 - XMM3
+ ;; xmm4 - XMM4
+ ;; xmm5 - XMM5
+ ;; xmm6 - XMM6
+ ;; xmm7 - XMM7
+ ;; xmm8 - XMM8
+ ;; xmm15 - T7
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, out_order, %%ENC_DEC, full
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ cmp r13, 128
+ jge %%_encrypt_by_8_new
+
+ vpshufb xmm9, [rel SHUF_MASK]
+ jmp %%_encrypt_by_8_parallel_done
+
+%%_encrypt_by_8:
+ vpshufb xmm9, [rel SHUF_MASK]
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC, full
+ vpshufb xmm9, [rel SHUF_MASK]
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ cmp r13, 128
+ jge %%_encrypt_by_8_new
+ vpshufb xmm9, [rel SHUF_MASK]
+
+
+%%_encrypt_by_8_parallel_done:
+ ;; Test to see if we need a by 8 with partial block. At this point
+ ;; bytes remaining should be either zero or between 113-127.
+ cmp r13, 0
+ je %%_encrypt_done
+
+%%_encrypt_by_8_partial:
+ ;; Shuffle needed to align key for partial block xor. out_order
+ ;; is a little faster because it avoids extra shuffles.
+ ;; TBD: Might need to account for when we don't have room to increment the counter.
+
+
+ ;; Process parallel buffers with a final partial block.
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC, partial
+
+
+ add %%DATA_OFFSET, 128-16
+ sub r13, 128-16
+
+%%_encrypt_final_partial:
+
+ vpshufb xmm8, [rel SHUF_MASK]
+ mov [%%GDATA_CTX + PBlockLen], r13
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm8
+
+ ;; xmm8 - Final encrypted counter - need to hash with partial or full block ciphertext
+ ;; GDATA, KEY, T1, T2
+ ENCRYPT_FINAL_PARTIAL_BLOCK xmm8, xmm0, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%ENC_DEC, %%DATA_OFFSET
+
+ vpshufb xmm8, [rel SHUF_MASK]
+
+
+%%_encrypt_done:
+
+ ;; Mapping to macro parameters
+ ;; IN:
+ ;; xmm9 contains the counter
+ ;; xmm1-xmm8 contain the xor'd ciphertext
+ ;; OUT:
+ ;; xmm14 contains the final hash
+ ;; GDATA, T1, T2, T3, T4, T5, T6, T7, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+%ifidn %%INSTANCE_TYPE, multi_call
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ jz %%_hash_last_8
+ GHASH_LAST_7 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+ ;; XOR the partial word into the hash
+ vpxor xmm14, xmm14, xmm8
+ jmp %%_ghash_done
+%endif
+%%_hash_last_8:
+ GHASH_LAST_8 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+
+%%_ghash_done:
+ vmovdqu [%%GDATA_CTX + CurCount], xmm9 ; my_ctx_data.current_counter = xmm9
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14 ; my_ctx_data.aad hash = xmm14
+
+%%_enc_dec_done:
+
+
+%endmacro ; GCM_ENC_DEC
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_COMPLETE Finishes Encyrption/Decryption of last partial block after GCM_UPDATE finishes.
+; Input: A gcm_key_data * (GDATA_KEY), gcm_context_data (GDATA_CTX) and whether encoding or decoding (ENC_DEC).
+; Output: Authorization Tag (AUTH_TAG) and Authorization Tag length (AUTH_TAG_LEN)
+; Clobbers rax, r10-r12, and xmm0, xmm1, xmm5, xmm6, xmm9, xmm11, xmm14, xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_COMPLETE 6
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%AUTH_TAG %3
+%define %%AUTH_TAG_LEN %4
+%define %%ENC_DEC %5
+%define %%INSTANCE_TYPE %6
+%define %%PLAIN_CYPH_LEN rax
+
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+ ;; Start AES as early as possible
+ vmovdqu xmm9, [%%GDATA_CTX + OrigIV] ; xmm9 = Y0
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9 ; E(K, Y0)
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If the GCM function is called as a single function call rather
+ ;; than invoking the individual parts (init, update, finalize) we
+ ;; can remove a write to read dependency on AadHash.
+ vmovdqu xmm14, [%%GDATA_CTX + AadHash]
+
+ ;; Encrypt the final partial block. If we did this as a single call then
+ ;; the partial block was handled in the main GCM_ENC_DEC macro.
+ mov r12, [%%GDATA_CTX + PBlockLen]
+ cmp r12, 0
+
+ je %%_partial_done
+
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+%%_partial_done:
+
+%endif
+
+ mov r12, [%%GDATA_CTX + AadLen] ; r12 = aadLen (number of bytes)
+ mov %%PLAIN_CYPH_LEN, [%%GDATA_CTX + InLen]
+
+ shl r12, 3 ; convert into number of bits
+ vmovd xmm15, r12d ; len(A) in xmm15
+
+ shl %%PLAIN_CYPH_LEN, 3 ; len(C) in bits (*128)
+ vmovq xmm1, %%PLAIN_CYPH_LEN
+ vpslldq xmm15, xmm15, 8 ; xmm15 = len(A)|| 0x0000000000000000
+ vpxor xmm15, xmm15, xmm1 ; xmm15 = len(A)||len(C)
+
+ vpxor xmm14, xmm15
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6
+ vpshufb xmm14, [rel SHUF_MASK] ; perform a 16Byte swap
+
+ vpxor xmm9, xmm9, xmm14
+
+
+%%_return_T:
+ mov r10, %%AUTH_TAG ; r10 = authTag
+ mov r11, %%AUTH_TAG_LEN ; r11 = auth_tag_len
+
+ cmp r11, 16
+ je %%_T_16
+
+ cmp r11, 12
+ je %%_T_12
+
+ cmp r11, 8
+ je %%_T_8
+
+ simd_store_avx r10, xmm9, r11, r12, rax
+ jmp %%_return_T_done
+%%_T_8:
+ vmovq rax, xmm9
+ mov [r10], rax
+ jmp %%_return_T_done
+%%_T_12:
+ vmovq rax, xmm9
+ mov [r10], rax
+ vpsrldq xmm9, xmm9, 8
+ vmovd eax, xmm9
+ mov [r10 + 8], eax
+ jmp %%_return_T_done
+%%_T_16:
+ vmovdqu [r10], xmm9
+
+%%_return_T_done:
+
+%ifdef SAFE_DATA
+ ;; Clear sensitive data from context structure
+ vpxor xmm0, xmm0
+ vmovdqu [%%GDATA_CTX + AadHash], xmm0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm0
+%endif
+%endmacro ; GCM_COMPLETE
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_precomp_128_avx512 /
+; aes_gcm_precomp_192_avx512 /
+; aes_gcm_precomp_256_avx512
+; (struct gcm_key_data *key_data)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(precomp,_),function,)
+FN_NAME(precomp,_):
+;; Parameter is passed through register
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_precomp
+%endif
+
+ push r12
+ push r13
+ push r14
+ push r15
+
+ mov r14, rsp
+
+
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63 ; align rsp to 64 bytes
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; only xmm6 needs to be maintained
+ vmovdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+%endif
+
+ vpxor xmm6, xmm6
+ ENCRYPT_SINGLE_BLOCK arg1, xmm6 ; xmm6 = HashKey
+
+ vpshufb xmm6, [rel SHUF_MASK]
+ ;;;;;;;;;;;;;;; PRECOMPUTATION of HashKey<<1 mod poly from the HashKey;;;;;;;;;;;;;;;
+ vmovdqa xmm2, xmm6
+ vpsllq xmm6, xmm6, 1
+ vpsrlq xmm2, xmm2, 63
+ vmovdqa xmm1, xmm2
+ vpslldq xmm2, xmm2, 8
+ vpsrldq xmm1, xmm1, 8
+ vpor xmm6, xmm6, xmm2
+ ;reduction
+ vpshufd xmm2, xmm1, 00100100b
+ vpcmpeqd xmm2, [rel TWOONE]
+ vpand xmm2, xmm2, [rel POLY]
+ vpxor xmm6, xmm6, xmm2 ; xmm6 holds the HashKey<<1 mod poly
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu [arg1 + HashKey], xmm6 ; store HashKey<<1 mod poly
+
+
+ PRECOMPUTE arg1, xmm6, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+ mov rsp, r14
+
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+
+exit_precomp:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_init_128_avx512 / aes_gcm_init_192_avx512 / aes_gcm_init_256_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(init,_),function,)
+FN_NAME(init,_):
+ push r12
+ push r13
+%ifidn __OUTPUT_FORMAT__, win64
+ push r14
+ push r15
+ mov r14, rsp
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 1*16
+ movdqu [rsp + 0*16], xmm6
+%endif
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_init
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_init
+
+ ;; Check IV != NULL
+ cmp arg3, 0
+ jz exit_init
+
+ ;; Check if aad_len == 0
+ cmp arg5, 0
+ jz skip_aad_check_init
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg4, 0
+ jz exit_init
+
+skip_aad_check_init:
+%endif
+ GCM_INIT arg1, arg2, arg3, arg4, arg5, r10, r11, r12
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%endif
+exit_init:
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm6 , [rsp + 0*16]
+ mov rsp, r14
+ pop r15
+ pop r14
+%endif
+ pop r13
+ pop r12
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_update_avx512 / aes_gcm_enc_192_update_avx512 /
+; aes_gcm_enc_256_update_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_update_),function,)
+FN_NAME(enc,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_enc
+
+skip_in_out_check_update_enc:
+%endif
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, multi_call
+
+exit_update_enc:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_update_avx512 / aes_gcm_dec_192_update_avx512 /
+; aes_gcm_dec_256_update_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_update_),function,)
+FN_NAME(dec,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_dec
+
+skip_in_out_check_update_dec:
+%endif
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, multi_call
+
+exit_update_dec:
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_finalize_avx512 / aes_gcm_enc_192_finalize_avx512 /
+; aes_gcm_enc_256_finalize_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_finalize_),function,)
+FN_NAME(enc,_finalize_):
+
+;; All parameters are passed through registers
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_enc_fin
+
+ cmp arg4, 16
+ ja exit_enc_fin
+%endif
+
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16], xmm6
+ vmovdqu [rsp + 1*16], xmm9
+ vmovdqu [rsp + 2*16], xmm11
+ vmovdqu [rsp + 3*16], xmm14
+ vmovdqu [rsp + 4*16], xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, ENC, multi_call
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + 4*16]
+ vmovdqu xmm14, [rsp + 3*16]
+ vmovdqu xmm11, [rsp + 2*16]
+ vmovdqu xmm9, [rsp + 1*16]
+ vmovdqu xmm6, [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+exit_enc_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_finalize_avx512 / aes_gcm_dec_192_finalize_avx512
+; aes_gcm_dec_256_finalize_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_finalize_),function,)
+FN_NAME(dec,_finalize_):
+
+;; All parameters are passed through registers
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_dec_fin
+
+ cmp arg4, 16
+ ja exit_dec_fin
+%endif
+
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ vmovdqu [rsp + 0*16], xmm6
+ vmovdqu [rsp + 1*16], xmm9
+ vmovdqu [rsp + 2*16], xmm11
+ vmovdqu [rsp + 3*16], xmm14
+ vmovdqu [rsp + 4*16], xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, DEC, multi_call
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + 4*16]
+ vmovdqu xmm14, [rsp + 3*16]
+ vmovdqu xmm11, [rsp + 2*16]
+ vmovdqu xmm9, [rsp + 1*16]
+ vmovdqu xmm6, [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+exit_dec_fin:
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_avx512 / aes_gcm_enc_192_avx512 / aes_gcm_enc_256_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_),function,)
+FN_NAME(enc,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_enc
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_enc
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_enc
+
+ cmp arg10, 16
+ ja exit_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_enc
+
+skip_in_out_check_enc:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_enc
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_enc
+
+skip_aad_check_enc:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8, r10, r11, r12
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, single_call
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, ENC, single_call
+
+exit_enc:
+ FUNC_RESTORE
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_avx512 / aes_gcm_dec_192_avx512 / aes_gcm_dec_256_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_),function,)
+FN_NAME(dec,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_dec
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_dec
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_dec
+
+ cmp arg10, 16
+ ja exit_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_dec
+
+skip_in_out_check_dec:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_dec
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_dec
+
+skip_aad_check_dec:
+%endif
+
+ GCM_INIT arg1, arg2, arg6, arg7, arg8, r10, r11, r12
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, single_call
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, DEC, single_call
+
+exit_dec:
+ FUNC_RESTORE
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/gcm_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/gcm_vaes_avx512.asm
new file mode 100644
index 000000000..4ef183d31
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/gcm_vaes_avx512.asm
@@ -0,0 +1,4272 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018-2019, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+; Tomasz Kantecki
+;
+;
+; References:
+; This code was derived and highly optimized from the code described in paper:
+; Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on Intel Architecture Processors. August, 2010
+; The details of the implementation is explained in:
+; Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode on Intel Architecture Processors. October, 2012.
+;
+;
+;
+;
+; Assumptions:
+;
+;
+;
+; iv:
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Salt (From the SA) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Initialization Vector |
+; | (This is the sequence number from IPSec header) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x1 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+;
+;
+; AAD:
+; AAD will be padded with 0 to the next 16byte multiple
+; for example, assume AAD is a u32 vector
+;
+; if AAD is 8 bytes:
+; AAD[3] = {A0, A1};
+; padded AAD in xmm register = {A1 A0 0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A1) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 32-bit Sequence Number (A0) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 32-bit Sequence Number
+;
+; if AAD is 12 bytes:
+; AAD[3] = {A0, A1, A2};
+; padded AAD in xmm register = {A2 A1 A0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A2) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 64-bit Extended Sequence Number {A1,A0} |
+; | |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 64-bit Extended Sequence Number
+;
+;
+; aadLen:
+; Must be a multiple of 4 bytes and from the definition of the spec.
+; The code additionally supports any aadLen length.
+;
+; TLen:
+; from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+;
+; poly = x^128 + x^127 + x^126 + x^121 + 1
+; throughout the code, one tab and two tab indentations are used. one tab is for GHASH part, two tabs is for AES part.
+;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/clear_regs.asm"
+%include "include/gcm_defines.asm"
+%include "include/gcm_keys_vaes_avx512.asm"
+%include "include/memcpy.asm"
+%include "include/aes_common.asm"
+
+%ifndef GCM128_MODE
+%ifndef GCM192_MODE
+%ifndef GCM256_MODE
+%error "No GCM mode selected for gcm_avx512.asm!"
+%endif
+%endif
+%endif
+
+;; Decide on AES-GCM key size to compile for
+%ifdef GCM128_MODE
+%define NROUNDS 9
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _128 %+ y %+ vaes_avx512
+%endif
+
+%ifdef GCM192_MODE
+%define NROUNDS 11
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _192 %+ y %+ vaes_avx512
+%endif
+
+%ifdef GCM256_MODE
+%define NROUNDS 13
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _256 %+ y %+ vaes_avx512
+%endif
+
+section .text
+default rel
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Stack frame definition
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE (10*16) ; space for 10 XMM registers
+ %define GP_STORAGE ((9*8) + 24) ; space for 9 GP registers + 24 bytes for 64 byte alignment
+%else
+ %define XMM_STORAGE 0
+ %define GP_STORAGE (8*8) ; space for 7 GP registers + 1 for alignment
+%endif
+%ifdef GCM_BIG_DATA
+%define LOCAL_STORAGE (128*16) ; space for up to 128 AES blocks
+%else
+%define LOCAL_STORAGE (48*16) ; space for up to 48 AES blocks
+%endif
+
+;;; sequence is (bottom-up): GP, XMM, local
+%define STACK_GP_OFFSET 0
+%define STACK_XMM_OFFSET (STACK_GP_OFFSET + GP_STORAGE)
+%define STACK_LOCAL_OFFSET (STACK_XMM_OFFSET + XMM_STORAGE)
+%define STACK_FRAME_SIZE (STACK_LOCAL_OFFSET + LOCAL_STORAGE)
+
+;; for compatibility with stack argument definitions in gcm_defines.asm
+%define STACK_OFFSET 0
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; Horizontal XOR - 4 x 128bits xored together
+%macro VHPXORI4x128 2
+%define %%REG %1 ; [in/out] ZMM with 4x128bits to xor; 128bit output
+%define %%TMP %2 ; [clobbered] ZMM temporary register
+ vextracti64x4 YWORD(%%TMP), %%REG, 1
+ vpxorq YWORD(%%REG), YWORD(%%REG), YWORD(%%TMP)
+ vextracti32x4 XWORD(%%TMP), YWORD(%%REG), 1
+ vpxorq XWORD(%%REG), XWORD(%%REG), XWORD(%%TMP)
+%endmacro ; VHPXORI4x128
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; Horizontal XOR - 2 x 128bits xored together
+%macro VHPXORI2x128 2
+%define %%REG %1 ; [in/out] YMM/ZMM with 2x128bits to xor; 128bit output
+%define %%TMP %2 ; [clobbered] XMM/YMM/ZMM temporary register
+ vextracti32x4 XWORD(%%TMP), %%REG, 1
+ vpxorq XWORD(%%REG), XWORD(%%REG), XWORD(%%TMP)
+%endmacro ; VHPXORI2x128
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; schoolbook multiply - 1st step
+%macro VCLMUL_STEP1 6-7
+%define %%KP %1 ; [in] key pointer
+%define %%HI %2 ; [in] previous blocks 4 to 7
+%define %%TMP %3 ; [clobbered] ZMM/YMM/XMM temporary
+%define %%TH %4 ; [out] high product
+%define %%TM %5 ; [out] medium product
+%define %%TL %6 ; [out] low product
+%define %%HKEY %7 ; [in/optional] hash key for multiplication
+
+%if %0 == 6
+ vmovdqu64 %%TMP, [%%KP + HashKey_4]
+%else
+ vmovdqa64 %%TMP, %%HKEY
+%endif
+ vpclmulqdq %%TH, %%HI, %%TMP, 0x11 ; %%T5 = a1*b1
+ vpclmulqdq %%TL, %%HI, %%TMP, 0x00 ; %%T7 = a0*b0
+ vpclmulqdq %%TM, %%HI, %%TMP, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%TMP, %%HI, %%TMP, 0x10 ; %%T4 = a0*b1
+ vpxorq %%TM, %%TM, %%TMP ; [%%TH : %%TM : %%TL]
+%endmacro ; VCLMUL_STEP1
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; schoolbook multiply - 2nd step
+%macro VCLMUL_STEP2 9-11
+%define %%KP %1 ; [in] key pointer
+%define %%HI %2 ; [out] ghash high 128 bits
+%define %%LO %3 ; [in/out] cipher text blocks 0-3 (in); ghash low 128 bits (out)
+%define %%TMP0 %4 ; [clobbered] ZMM/YMM/XMM temporary
+%define %%TMP1 %5 ; [clobbered] ZMM/YMM/XMM temporary
+%define %%TMP2 %6 ; [clobbered] ZMM/YMM/XMM temporary
+%define %%TH %7 ; [in] high product
+%define %%TM %8 ; [in] medium product
+%define %%TL %9 ; [in] low product
+%define %%HKEY %10 ; [in/optional] hash key for multiplication
+%define %%HXOR %11 ; [in/optional] type of horizontal xor (4 - 4x128; 2 - 2x128; 1 - none)
+
+%if %0 == 9
+ vmovdqu64 %%TMP0, [%%KP + HashKey_8]
+%else
+ vmovdqa64 %%TMP0, %%HKEY
+%endif
+ vpclmulqdq %%TMP1, %%LO, %%TMP0, 0x10 ; %%TMP1 = a0*b1
+ vpclmulqdq %%TMP2, %%LO, %%TMP0, 0x11 ; %%TMP2 = a1*b1
+ vpxorq %%TH, %%TH, %%TMP2
+ vpclmulqdq %%TMP2, %%LO, %%TMP0, 0x00 ; %%TMP2 = a0*b0
+ vpxorq %%TL, %%TL, %%TMP2
+ vpclmulqdq %%TMP0, %%LO, %%TMP0, 0x01 ; %%TMP0 = a1*b0
+ vpternlogq %%TM, %%TMP1, %%TMP0, 0x96 ; %%TM = TM xor TMP1 xor TMP0
+
+ ;; finish multiplications
+ vpsrldq %%TMP2, %%TM, 8
+ vpxorq %%HI, %%TH, %%TMP2
+ vpslldq %%TMP2, %%TM, 8
+ vpxorq %%LO, %%TL, %%TMP2
+
+ ;; xor 128bit words horizontally and compute [(X8*H1) + (X7*H2) + ... ((X1+Y0)*H8]
+ ;; note: (X1+Y0) handled elsewhere
+%if %0 < 11
+ VHPXORI4x128 %%HI, %%TMP2
+ VHPXORI4x128 %%LO, %%TMP1
+%else
+%if %%HXOR == 4
+ VHPXORI4x128 %%HI, %%TMP2
+ VHPXORI4x128 %%LO, %%TMP1
+%elif %%HXOR == 2
+ VHPXORI2x128 %%HI, %%TMP2
+ VHPXORI2x128 %%LO, %%TMP1
+%endif ; HXOR
+ ;; for HXOR == 1 there is nothing to be done
+%endif ; !(%0 < 11)
+ ;; HIx holds top 128 bits
+ ;; LOx holds low 128 bits
+ ;; - further reductions to follow
+%endmacro ; VCLMUL_STEP2
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; AVX512 reduction macro
+%macro VCLMUL_REDUCE 6
+%define %%OUT %1 ; [out] zmm/ymm/xmm: result (must not be %%TMP1 or %%HI128)
+%define %%POLY %2 ; [in] zmm/ymm/xmm: polynomial
+%define %%HI128 %3 ; [in] zmm/ymm/xmm: high 128b of hash to reduce
+%define %%LO128 %4 ; [in] zmm/ymm/xmm: low 128b of hash to reduce
+%define %%TMP0 %5 ; [in] zmm/ymm/xmm: temporary register
+%define %%TMP1 %6 ; [in] zmm/ymm/xmm: temporary register
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; first phase of the reduction
+ vpclmulqdq %%TMP0, %%POLY, %%LO128, 0x01
+ vpslldq %%TMP0, %%TMP0, 8 ; shift-L 2 DWs
+ vpxorq %%TMP0, %%LO128, %%TMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; second phase of the reduction
+ vpclmulqdq %%TMP1, %%POLY, %%TMP0, 0x00
+ vpsrldq %%TMP1, %%TMP1, 4 ; shift-R only 1-DW to obtain 2-DWs shift-R
+
+ vpclmulqdq %%OUT, %%POLY, %%TMP0, 0x10
+ vpslldq %%OUT, %%OUT, 4 ; shift-L 1-DW to obtain result with no shifts
+
+ vpternlogq %%OUT, %%TMP1, %%HI128, 0x96 ; OUT/GHASH = OUT xor TMP1 xor HI128
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%endmacro
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; schoolbook multiply (1 to 8 blocks) - 1st step
+%macro VCLMUL_1_TO_8_STEP1 8
+%define %%KP %1 ; [in] key pointer
+%define %%HI %2 ; [in] ZMM ciphered blocks 4 to 7
+%define %%TMP1 %3 ; [clobbered] ZMM temporary
+%define %%TMP2 %4 ; [clobbered] ZMM temporary
+%define %%TH %5 ; [out] ZMM high product
+%define %%TM %6 ; [out] ZMM medium product
+%define %%TL %7 ; [out] ZMM low product
+%define %%NBLOCKS %8 ; [in] number of blocks to ghash (0 to 8)
+
+%if %%NBLOCKS == 8
+ VCLMUL_STEP1 %%KP, %%HI, %%TMP1, %%TH, %%TM, %%TL
+%elif %%NBLOCKS == 7
+ vmovdqu64 %%TMP2, [%%KP + HashKey_3]
+ vmovdqa64 %%TMP1, [rel mask_out_top_block]
+ vpandq %%TMP2, %%TMP1
+ vpandq %%HI, %%TMP1
+ VCLMUL_STEP1 NULL, %%HI, %%TMP1, %%TH, %%TM, %%TL, %%TMP2
+%elif %%NBLOCKS == 6
+ vmovdqu64 YWORD(%%TMP2), [%%KP + HashKey_2]
+ VCLMUL_STEP1 NULL, YWORD(%%HI), YWORD(%%TMP1), \
+ YWORD(%%TH), YWORD(%%TM), YWORD(%%TL), YWORD(%%TMP2)
+%elif %%NBLOCKS == 5
+ vmovdqu64 XWORD(%%TMP2), [%%KP + HashKey_1]
+ VCLMUL_STEP1 NULL, XWORD(%%HI), XWORD(%%TMP1), \
+ XWORD(%%TH), XWORD(%%TM), XWORD(%%TL), XWORD(%%TMP2)
+%else
+ vpxorq %%TH, %%TH
+ vpxorq %%TM, %%TM
+ vpxorq %%TL, %%TL
+%endif
+%endmacro ; VCLMUL_1_TO_8_STEP1
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; schoolbook multiply (1 to 8 blocks) - 2nd step
+%macro VCLMUL_1_TO_8_STEP2 10
+%define %%KP %1 ; [in] key pointer
+%define %%HI %2 ; [out] ZMM ghash high 128bits
+%define %%LO %3 ; [in/out] ZMM ciphered blocks 0 to 3 (in); ghash low 128bits (out)
+%define %%TMP0 %4 ; [clobbered] ZMM temporary
+%define %%TMP1 %5 ; [clobbered] ZMM temporary
+%define %%TMP2 %6 ; [clobbered] ZMM temporary
+%define %%TH %7 ; [in/clobbered] ZMM high sum
+%define %%TM %8 ; [in/clobbered] ZMM medium sum
+%define %%TL %9 ; [in/clobbered] ZMM low sum
+%define %%NBLOCKS %10 ; [in] number of blocks to ghash (0 to 8)
+
+%if %%NBLOCKS == 8
+ VCLMUL_STEP2 %%KP, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL
+%elif %%NBLOCKS == 7
+ vmovdqu64 %%TMP2, [%%KP + HashKey_7]
+ VCLMUL_STEP2 NULL, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL, %%TMP2, 4
+%elif %%NBLOCKS == 6
+ vmovdqu64 %%TMP2, [%%KP + HashKey_6]
+ VCLMUL_STEP2 NULL, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL, %%TMP2, 4
+%elif %%NBLOCKS == 5
+ vmovdqu64 %%TMP2, [%%KP + HashKey_5]
+ VCLMUL_STEP2 NULL, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL, %%TMP2, 4
+%elif %%NBLOCKS == 4
+ vmovdqu64 %%TMP2, [%%KP + HashKey_4]
+ VCLMUL_STEP2 NULL, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL, %%TMP2, 4
+%elif %%NBLOCKS == 3
+ vmovdqu64 %%TMP2, [%%KP + HashKey_3]
+ vmovdqa64 %%TMP1, [rel mask_out_top_block]
+ vpandq %%TMP2, %%TMP1
+ vpandq %%LO, %%TMP1
+ VCLMUL_STEP2 NULL, %%HI, %%LO, %%TMP0, %%TMP1, %%TMP2, %%TH, %%TM, %%TL, %%TMP2, 4
+%elif %%NBLOCKS == 2
+ vmovdqu64 YWORD(%%TMP2), [%%KP + HashKey_2]
+ VCLMUL_STEP2 NULL, YWORD(%%HI), YWORD(%%LO), \
+ YWORD(%%TMP0), YWORD(%%TMP1), YWORD(%%TMP2), \
+ YWORD(%%TH), YWORD(%%TM), YWORD(%%TL), YWORD(%%TMP2), 2
+%elif %%NBLOCKS == 1
+ vmovdqu64 XWORD(%%TMP2), [%%KP + HashKey_1]
+ VCLMUL_STEP2 NULL, XWORD(%%HI), XWORD(%%LO), \
+ XWORD(%%TMP0), XWORD(%%TMP1), XWORD(%%TMP2), \
+ XWORD(%%TH), XWORD(%%TM), XWORD(%%TL), XWORD(%%TMP2), 1
+%else
+ vpxorq %%HI, %%HI
+ vpxorq %%LO, %%LO
+%endif
+%endmacro ; VCLMUL_1_TO_8_STEP2
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; GHASH 1 to 16 blocks of cipher text
+;;; - performs reduction at the end
+;;; - can take intermediate GHASH sums as input
+%macro GHASH_1_TO_16 20
+%define %%KP %1 ; [in] pointer to expanded keys
+%define %%GHASH %2 ; [out] ghash output
+%define %%T1 %3 ; [clobbered] temporary ZMM
+%define %%T2 %4 ; [clobbered] temporary ZMM
+%define %%T3 %5 ; [clobbered] temporary ZMM
+%define %%T4 %6 ; [clobbered] temporary ZMM
+%define %%T5 %7 ; [clobbered] temporary ZMM
+%define %%T6 %8 ; [clobbered] temporary ZMM
+%define %%T7 %9 ; [clobbered] temporary ZMM
+%define %%T8 %10 ; [clobbered] temporary ZMM
+%define %%T9 %11 ; [clobbered] temporary ZMM
+%define %%GH %12 ; [in/cloberred] ghash sum (high) or "no_zmm"
+%define %%GL %13 ; [in/cloberred] ghash sum (low) or "no_zmm"
+%define %%GM %14 ; [in/cloberred] ghash sum (medium) or "no_zmm"
+%define %%AAD_HASH_IN %15 ; [in] input hash value
+%define %%CIPHER_IN0 %16 ; [in] ZMM with cipher text blocks 0-3
+%define %%CIPHER_IN1 %17 ; [in] ZMM with cipher text blocks 4-7
+%define %%CIPHER_IN2 %18 ; [in] ZMM with cipher text blocks 8-11
+%define %%CIPHER_IN3 %19 ; [in] ZMM with cipher text blocks 12-15
+%define %%NUM_BLOCKS %20 ; [in] numerical value, number of blocks
+
+%define %%T0H %%T1
+%define %%T0L %%T2
+%define %%T0M1 %%T3
+%define %%T0M2 %%T4
+
+%define %%T1H %%T5
+%define %%T1L %%T6
+%define %%T1M1 %%T7
+%define %%T1M2 %%T8
+
+%define %%HK %%T9
+
+%assign hashk HashKey_ %+ %%NUM_BLOCKS
+%assign reg_idx 0
+%assign blocks_left %%NUM_BLOCKS
+
+ vpxorq %%CIPHER_IN0, %%CIPHER_IN0, %%AAD_HASH_IN
+
+%assign first_result 1
+
+%ifnidn %%GH, no_zmm
+%ifnidn %%GM, no_zmm
+%ifnidn %%GL, no_zmm
+ ;; GHASH sums passed in to be updated and
+ ;; reduced at the end
+ vmovdqa64 %%T0H, %%GH
+ vmovdqa64 %%T0L, %%GL
+ vmovdqa64 %%T0M1, %%GM
+ vpxorq %%T0M2, %%T0M2
+%assign first_result 0
+%endif
+%endif
+%endif
+
+%rep (blocks_left / 4)
+%xdefine %%REG_IN %%CIPHER_IN %+ reg_idx
+ vmovdqu64 %%HK, [%%KP + hashk]
+%if first_result == 1
+ vpclmulqdq %%T0H, %%REG_IN, %%HK, 0x11 ; H = a1*b1
+ vpclmulqdq %%T0L, %%REG_IN, %%HK, 0x00 ; L = a0*b0
+ vpclmulqdq %%T0M1, %%REG_IN, %%HK, 0x01 ; M1 = a1*b0
+ vpclmulqdq %%T0M2, %%REG_IN, %%HK, 0x10 ; TM2 = a0*b1
+%assign first_result 0
+%else
+ vpclmulqdq %%T1H, %%REG_IN, %%HK, 0x11 ; H = a1*b1
+ vpclmulqdq %%T1L, %%REG_IN, %%HK, 0x00 ; L = a0*b0
+ vpclmulqdq %%T1M1, %%REG_IN, %%HK, 0x01 ; M1 = a1*b0
+ vpclmulqdq %%T1M2, %%REG_IN, %%HK, 0x10 ; M2 = a0*b1
+ vpxorq %%T0H, %%T0H, %%T1H
+ vpxorq %%T0L, %%T0L, %%T1L
+ vpxorq %%T0M1, %%T0M1, %%T1M1
+ vpxorq %%T0M2, %%T0M2, %%T1M2
+%endif
+%undef %%REG_IN
+%assign reg_idx (reg_idx + 1)
+%assign hashk (hashk + 64)
+%assign blocks_left (blocks_left - 4)
+%endrep
+
+%if blocks_left > 0
+;; There are 1, 2 or 3 blocks left to process.
+;; It may also be that they are the only blocks to process.
+
+%xdefine %%REG_IN %%CIPHER_IN %+ reg_idx
+
+%if first_result == 1
+;; Case where %%NUM_BLOCKS = 1, 2 or 3
+%xdefine %%OUT_H %%T0H
+%xdefine %%OUT_L %%T0L
+%xdefine %%OUT_M1 %%T0M1
+%xdefine %%OUT_M2 %%T0M2
+%else
+%xdefine %%OUT_H %%T1H
+%xdefine %%OUT_L %%T1L
+%xdefine %%OUT_M1 %%T1M1
+%xdefine %%OUT_M2 %%T1M2
+%endif
+
+%if blocks_left == 1
+ vmovdqu64 XWORD(%%HK), [%%KP + hashk]
+ vpclmulqdq XWORD(%%OUT_H), XWORD(%%REG_IN), XWORD(%%HK), 0x11 ; %%TH = a1*b1
+ vpclmulqdq XWORD(%%OUT_L), XWORD(%%REG_IN), XWORD(%%HK), 0x00 ; %%TL = a0*b0
+ vpclmulqdq XWORD(%%OUT_M1), XWORD(%%REG_IN), XWORD(%%HK), 0x01 ; %%TM1 = a1*b0
+ vpclmulqdq XWORD(%%OUT_M2), XWORD(%%REG_IN), XWORD(%%HK), 0x10 ; %%TM2 = a0*b1
+%elif blocks_left == 2
+ vmovdqu64 YWORD(%%HK), [%%KP + hashk]
+ vpclmulqdq YWORD(%%OUT_H), YWORD(%%REG_IN), YWORD(%%HK), 0x11 ; %%TH = a1*b1
+ vpclmulqdq YWORD(%%OUT_L), YWORD(%%REG_IN), YWORD(%%HK), 0x00 ; %%TL = a0*b0
+ vpclmulqdq YWORD(%%OUT_M1), YWORD(%%REG_IN), YWORD(%%HK), 0x01 ; %%TM1 = a1*b0
+ vpclmulqdq YWORD(%%OUT_M2), YWORD(%%REG_IN), YWORD(%%HK), 0x10 ; %%TM2 = a0*b1
+%else ; blocks_left == 3
+ vmovdqu64 YWORD(%%HK), [%%KP + hashk]
+ vinserti64x2 %%HK, [%%KP + hashk + 32], 2
+ vpclmulqdq %%OUT_H, %%REG_IN, %%HK, 0x11 ; %%TH = a1*b1
+ vpclmulqdq %%OUT_L, %%REG_IN, %%HK, 0x00 ; %%TL = a0*b0
+ vpclmulqdq %%OUT_M1, %%REG_IN, %%HK, 0x01 ; %%TM1 = a1*b0
+ vpclmulqdq %%OUT_M2, %%REG_IN, %%HK, 0x10 ; %%TM2 = a0*b1
+%endif ; blocks_left
+
+%undef %%REG_IN
+%undef %%OUT_H
+%undef %%OUT_L
+%undef %%OUT_M1
+%undef %%OUT_M2
+
+%if first_result != 1
+ vpxorq %%T0H, %%T0H, %%T1H
+ vpxorq %%T0L, %%T0L, %%T1L
+ vpxorq %%T0M1, %%T0M1, %%T1M1
+ vpxorq %%T0M2, %%T0M2, %%T1M2
+%endif
+
+%endif ; blocks_left > 0
+
+ ;; integrate TM into TH and TL
+ vpxorq %%T0M1, %%T0M1, %%T0M2
+ vpsrldq %%T1M1, %%T0M1, 8
+ vpslldq %%T1M2, %%T0M1, 8
+ vpxorq %%T0H, %%T0H, %%T1M1
+ vpxorq %%T0L, %%T0L, %%T1M2
+
+ ;; add TH and TL 128-bit words horizontally
+ VHPXORI4x128 %%T0H, %%T1M1
+ VHPXORI4x128 %%T0L, %%T1M2
+
+ ;; reduction
+ vmovdqa64 XWORD(%%HK), [rel POLY2]
+ VCLMUL_REDUCE XWORD(%%GHASH), XWORD(%%HK), \
+ XWORD(%%T0H), XWORD(%%T0L), XWORD(%%T0M1), XWORD(%%T0M2)
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+;;; Input: A and B (128-bits each, bit-reflected)
+;;; Output: C = A*B*x mod poly, (i.e. >>1 )
+;;; To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+;;; GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GHASH_MUL 7
+%define %%GH %1 ; 16 Bytes
+%define %%HK %2 ; 16 Bytes
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ vpclmulqdq %%T1, %%GH, %%HK, 0x11 ; %%T1 = a1*b1
+ vpclmulqdq %%T2, %%GH, %%HK, 0x00 ; %%T2 = a0*b0
+ vpclmulqdq %%T3, %%GH, %%HK, 0x01 ; %%T3 = a1*b0
+ vpclmulqdq %%GH, %%GH, %%HK, 0x10 ; %%GH = a0*b1
+ vpxorq %%GH, %%GH, %%T3
+
+
+ vpsrldq %%T3, %%GH, 8 ; shift-R %%GH 2 DWs
+ vpslldq %%GH, %%GH, 8 ; shift-L %%GH 2 DWs
+
+ vpxorq %%T1, %%T1, %%T3
+ vpxorq %%GH, %%GH, %%T2
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ vmovdqu64 %%T3, [rel POLY2]
+
+ vpclmulqdq %%T2, %%T3, %%GH, 0x01
+ vpslldq %%T2, %%T2, 8 ; shift-L %%T2 2 DWs
+
+ vpxorq %%GH, %%GH, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ vpclmulqdq %%T2, %%T3, %%GH, 0x00
+ vpsrldq %%T2, %%T2, 4 ; shift-R only 1-DW to obtain 2-DWs shift-R
+
+ vpclmulqdq %%GH, %%T3, %%GH, 0x10
+ vpslldq %%GH, %%GH, 4 ; Shift-L 1-DW to obtain result with no shifts
+
+ ; second phase of the reduction complete, the result is in %%GH
+ vpternlogq %%GH, %%T1, %%T2, 0x96 ; GH = GH xor T1 xor T2
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; In PRECOMPUTE, the commands filling Hashkey_i_k are not required for avx512
+;;; functions, but are kept to allow users to switch cpu architectures between calls
+;;; of pre, init, update, and finalize.
+%macro PRECOMPUTE 8
+%define %%GDATA %1
+%define %%HK %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+%define %%T6 %8
+
+ vmovdqa %%T5, %%HK
+
+ ;; GHASH keys 2 to 48 or 128
+%ifdef GCM_BIG_DATA
+%assign max_hkey_idx 128
+%else
+%assign max_hkey_idx 48
+%endif
+
+%assign i 2
+%rep (max_hkey_idx - 1)
+ GHASH_MUL %%T5, %%HK, %%T1, %%T3, %%T4, %%T6, %%T2 ; %%T5 = HashKey^i<<1 mod poly
+ vmovdqu [%%GDATA + HashKey_ %+ i], %%T5 ; [HashKey_i] = %%T5
+%assign i (i + 1)
+%endrep
+
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; READ_SMALL_DATA_INPUT
+;;; Packs xmm register with data when data input is less or equal to 16 bytes
+;;; Returns 0 if data has length 0
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro READ_SMALL_DATA_INPUT 5
+%define %%OUTPUT %1 ; [out] xmm register
+%define %%INPUT %2 ; [in] buffer pointer to read from
+%define %%LENGTH %3 ; [in] number of bytes to read
+%define %%TMP1 %4 ; [clobbered]
+%define %%MASK %5 ; [out] k1 to k7 register to store the partial block mask
+
+ cmp %%LENGTH, 16
+ jge %%_read_small_data_ge16
+ lea %%TMP1, [rel byte_len_to_mask_table]
+%ifidn __OUTPUT_FORMAT__, win64
+ add %%TMP1, %%LENGTH
+ add %%TMP1, %%LENGTH
+ kmovw %%MASK, [%%TMP1]
+%else
+ kmovw %%MASK, [%%TMP1 + %%LENGTH*2]
+%endif
+ vmovdqu8 %%OUTPUT{%%MASK}{z}, [%%INPUT]
+ jmp %%_read_small_data_end
+%%_read_small_data_ge16:
+ VX512LDR %%OUTPUT, [%%INPUT]
+ mov %%TMP1, 0xffff
+ kmovq %%MASK, %%TMP1
+%%_read_small_data_end:
+%endmacro ; READ_SMALL_DATA_INPUT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+; Input: The input data (A_IN), that data's length (A_LEN), and the hash key (HASH_KEY).
+; Output: The hash of the data (AAD_HASH).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CALC_AAD_HASH 18
+%define %%A_IN %1 ; [in] AAD text pointer
+%define %%A_LEN %2 ; [in] AAD length
+%define %%AAD_HASH %3 ; [out] xmm ghash value
+%define %%GDATA_KEY %4 ; [in] pointer to keys
+%define %%ZT0 %5 ; [clobbered] ZMM register
+%define %%ZT1 %6 ; [clobbered] ZMM register
+%define %%ZT2 %7 ; [clobbered] ZMM register
+%define %%ZT3 %8 ; [clobbered] ZMM register
+%define %%ZT4 %9 ; [clobbered] ZMM register
+%define %%ZT5 %10 ; [clobbered] ZMM register
+%define %%ZT6 %11 ; [clobbered] ZMM register
+%define %%ZT7 %12 ; [clobbered] ZMM register
+%define %%ZT8 %13 ; [clobbered] ZMM register
+%define %%ZT9 %14 ; [clobbered] ZMM register
+%define %%T1 %15 ; [clobbered] GP register
+%define %%T2 %16 ; [clobbered] GP register
+%define %%T3 %17 ; [clobbered] GP register
+%define %%MASKREG %18 ; [clobbered] mask register
+
+%define %%SHFMSK %%ZT9
+%define %%POLY %%ZT8
+%define %%TH %%ZT7
+%define %%TM %%ZT6
+%define %%TL %%ZT5
+
+ mov %%T1, %%A_IN ; T1 = AAD
+ mov %%T2, %%A_LEN ; T2 = aadLen
+ vpxorq %%AAD_HASH, %%AAD_HASH
+
+ vmovdqa64 %%SHFMSK, [rel SHUF_MASK]
+ vmovdqa64 %%POLY, [rel POLY2]
+
+%%_get_AAD_loop128:
+ cmp %%T2, 128
+ jl %%_exit_AAD_loop128
+
+ vmovdqu64 %%ZT2, [%%T1 + 64*0] ; LO blocks (0-3)
+ vmovdqu64 %%ZT1, [%%T1 + 64*1] ; HI blocks (4-7)
+ vpshufb %%ZT2, %%SHFMSK
+ vpshufb %%ZT1, %%SHFMSK
+
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+
+ VCLMUL_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%TH, %%TM, %%TL
+ VCLMUL_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, %%ZT0, %%ZT3, %%ZT4, %%TH, %%TM, %%TL
+
+ ;; result in %%ZT1(H):%%ZT2(L)
+ ;; reduce and put the result in AAD_HASH
+ VCLMUL_REDUCE %%AAD_HASH, XWORD(%%POLY), XWORD(%%ZT1), XWORD(%%ZT2), \
+ XWORD(%%ZT0), XWORD(%%ZT3)
+
+ sub %%T2, 128
+ je %%_CALC_AAD_done
+
+ add %%T1, 128
+ jmp %%_get_AAD_loop128
+
+%%_exit_AAD_loop128:
+ or %%T2, %%T2
+ jz %%_CALC_AAD_done
+
+ ;; prep mask source address
+ lea %%T3, [rel byte64_len_to_mask_table]
+ lea %%T3, [%%T3 + %%T2*8]
+
+ ;; calculate number of blocks to ghash (including partial bytes)
+ add %%T2, 15
+ and %%T2, -16 ; 1 to 8 blocks possible here
+ shr %%T2, 4
+ cmp %%T2, 7
+ je %%_AAD_blocks_7
+ cmp %%T2, 6
+ je %%_AAD_blocks_6
+ cmp %%T2, 5
+ je %%_AAD_blocks_5
+ cmp %%T2, 4
+ je %%_AAD_blocks_4
+ cmp %%T2, 3
+ je %%_AAD_blocks_3
+ cmp %%T2, 2
+ je %%_AAD_blocks_2
+ cmp %%T2, 1
+ je %%_AAD_blocks_1
+ ;; fall through for 8 blocks
+
+ ;; The flow of each of these cases is identical:
+ ;; - load blocks plain text
+ ;; - shuffle loaded blocks
+ ;; - xor in current hash value into block 0
+ ;; - perform up multiplications with ghash keys
+ ;; - jump to reduction code
+%%_AAD_blocks_8:
+ sub %%T3, (64 * 8)
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2, [%%T1 + 64*0]
+ vmovdqu8 %%ZT1{%%MASKREG}{z}, [%%T1 + 64*1]
+ vpshufb %%ZT2, %%SHFMSK
+ vpshufb %%ZT1, %%SHFMSK
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH) ; xor in current ghash
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 8
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 8
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_7:
+ sub %%T3, (64 * 8)
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2, [%%T1 + 64*0]
+ vmovdqu8 %%ZT1{%%MASKREG}{z}, [%%T1 + 64*1]
+ vpshufb %%ZT2, %%SHFMSK
+ vpshufb %%ZT1, %%SHFMSK
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH) ; xor in current ghash
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 7
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 7
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_6:
+ sub %%T3, (64 * 8)
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2, [%%T1 + 64*0]
+ vmovdqu8 YWORD(%%ZT1){%%MASKREG}{z}, [%%T1 + 64*1]
+ vpshufb %%ZT2, %%SHFMSK
+ vpshufb YWORD(%%ZT1), YWORD(%%SHFMSK)
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 6
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 6
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_5:
+ sub %%T3, (64 * 8)
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2, [%%T1 + 64*0]
+ vmovdqu8 XWORD(%%ZT1){%%MASKREG}{z}, [%%T1 + 64*1]
+ vpshufb %%ZT2, %%SHFMSK
+ vpshufb XWORD(%%ZT1), XWORD(%%SHFMSK)
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 5
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 5
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_4:
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2{%%MASKREG}{z}, [%%T1 + 64*0]
+ vpshufb %%ZT2, %%SHFMSK
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 4
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 4
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_3:
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 %%ZT2{%%MASKREG}{z}, [%%T1 + 64*0]
+ vpshufb %%ZT2, %%SHFMSK
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 3
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 3
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_2:
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 YWORD(%%ZT2){%%MASKREG}{z}, [%%T1 + 64*0]
+ vpshufb YWORD(%%ZT2), YWORD(%%SHFMSK)
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 2
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 2
+ jmp %%_AAD_blocks_done
+
+%%_AAD_blocks_1:
+ kmovq %%MASKREG, [%%T3]
+ vmovdqu8 XWORD(%%ZT2){%%MASKREG}{z}, [%%T1 + 64*0]
+ vpshufb XWORD(%%ZT2), XWORD(%%SHFMSK)
+ vpxorq %%ZT2, %%ZT2, ZWORD(%%AAD_HASH)
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT1, %%ZT0, %%ZT3, %%TH, %%TM, %%TL, 1
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT1, %%ZT2, \
+ %%ZT0, %%ZT3, %%ZT4, \
+ %%TH, %%TM, %%TL, 1
+
+%%_AAD_blocks_done:
+ ;; Multiplications have been done. Do the reduction now
+ VCLMUL_REDUCE %%AAD_HASH, XWORD(%%POLY), XWORD(%%ZT1), XWORD(%%ZT2), \
+ XWORD(%%ZT0), XWORD(%%ZT3)
+%%_CALC_AAD_done:
+ ;; result in AAD_HASH
+
+%endmacro ; CALC_AAD_HASH
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; PARTIAL_BLOCK
+;;; Handles encryption/decryption and the tag partial blocks between
+;;; update calls.
+;;; Requires the input data be at least 1 byte long.
+;;; Output:
+;;; A cipher/plain of the first partial block (CYPH_PLAIN_OUT),
+;;; AAD_HASH and updated GDATA_CTX
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro PARTIAL_BLOCK 22
+%define %%GDATA_KEY %1 ; [in] key pointer
+%define %%GDATA_CTX %2 ; [in] context pointer
+%define %%CYPH_PLAIN_OUT %3 ; [in] output buffer
+%define %%PLAIN_CYPH_IN %4 ; [in] input buffer
+%define %%PLAIN_CYPH_LEN %5 ; [in] buffer length
+%define %%DATA_OFFSET %6 ; [in/out] data offset (gets updated)
+%define %%AAD_HASH %7 ; [out] updated GHASH value
+%define %%ENC_DEC %8 ; [in] cipher direction
+%define %%GPTMP0 %9 ; [clobbered] GP temporary register
+%define %%GPTMP1 %10 ; [clobbered] GP temporary register
+%define %%GPTMP2 %11 ; [clobbered] GP temporary register
+%define %%ZTMP0 %12 ; [clobbered] ZMM temporary register
+%define %%ZTMP1 %13 ; [clobbered] ZMM temporary register
+%define %%ZTMP2 %14 ; [clobbered] ZMM temporary register
+%define %%ZTMP3 %15 ; [clobbered] ZMM temporary register
+%define %%ZTMP4 %16 ; [clobbered] ZMM temporary register
+%define %%ZTMP5 %17 ; [clobbered] ZMM temporary register
+%define %%ZTMP6 %18 ; [clobbered] ZMM temporary register
+%define %%ZTMP7 %19 ; [clobbered] ZMM temporary register
+%define %%ZTMP8 %20 ; [clobbered] ZMM temporary register
+%define %%ZTMP9 %21 ; [clobbered] ZMM temporary register
+%define %%MASKREG %22 ; [clobbered] mask temporary register
+
+%define %%XTMP0 XWORD(%%ZTMP0)
+%define %%XTMP1 XWORD(%%ZTMP1)
+%define %%XTMP2 XWORD(%%ZTMP2)
+%define %%XTMP3 XWORD(%%ZTMP3)
+%define %%XTMP4 XWORD(%%ZTMP4)
+%define %%XTMP5 XWORD(%%ZTMP5)
+%define %%XTMP6 XWORD(%%ZTMP6)
+%define %%XTMP7 XWORD(%%ZTMP7)
+%define %%XTMP8 XWORD(%%ZTMP8)
+%define %%XTMP9 XWORD(%%ZTMP9)
+
+%define %%LENGTH %%GPTMP0
+%define %%IA0 %%GPTMP1
+%define %%IA1 %%GPTMP2
+
+ mov %%LENGTH, [%%GDATA_CTX + PBlockLen]
+ or %%LENGTH, %%LENGTH
+ je %%_partial_block_done ;Leave Macro if no partial blocks
+
+ READ_SMALL_DATA_INPUT %%XTMP0, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%IA0, %%MASKREG
+
+ ;; XTMP1 = my_ctx_data.partial_block_enc_key
+ vmovdqu64 %%XTMP1, [%%GDATA_CTX + PBlockEncKey]
+ vmovdqu64 %%XTMP2, [%%GDATA_KEY + HashKey]
+
+ ;; adjust the shuffle mask pointer to be able to shift right %%LENGTH bytes
+ ;; (16 - %%LENGTH) is the number of bytes in plaintext mod 16)
+ lea %%IA0, [rel SHIFT_MASK]
+ add %%IA0, %%LENGTH
+ vmovdqu64 %%XTMP3, [%%IA0] ; shift right shuffle mask
+ vpshufb %%XTMP1, %%XTMP3
+
+%ifidn %%ENC_DEC, DEC
+ ;; keep copy of cipher text in %%XTMP4
+ vmovdqa64 %%XTMP4, %%XTMP0
+%endif
+ vpxorq %%XTMP1, %%XTMP0 ; Cyphertext XOR E(K, Yn)
+
+ ;; Set %%IA1 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ ;; Determine if partial block is not being filled and shift mask accordingly
+ mov %%IA1, %%PLAIN_CYPH_LEN
+ add %%IA1, %%LENGTH
+ sub %%IA1, 16
+ jge %%_no_extra_mask
+ sub %%IA0, %%IA1
+%%_no_extra_mask:
+ ;; get the appropriate mask to mask out bottom %%LENGTH bytes of %%XTMP1
+ ;; - mask out bottom %%LENGTH bytes of %%XTMP1
+ vmovdqu64 %%XTMP0, [%%IA0 + ALL_F - SHIFT_MASK]
+ vpand %%XTMP1, %%XTMP0
+
+%ifidn %%ENC_DEC, DEC
+ vpand %%XTMP4, %%XTMP0
+ vpshufb %%XTMP4, [rel SHUF_MASK]
+ vpshufb %%XTMP4, %%XTMP3
+ vpxorq %%AAD_HASH, %%XTMP4
+%else
+ vpshufb %%XTMP1, [rel SHUF_MASK]
+ vpshufb %%XTMP1, %%XTMP3
+ vpxorq %%AAD_HASH, %%XTMP1
+%endif
+ cmp %%IA1, 0
+ jl %%_partial_incomplete
+
+ ;; GHASH computation for the last <16 Byte block
+ GHASH_MUL %%AAD_HASH, %%XTMP2, %%XTMP5, %%XTMP6, %%XTMP7, %%XTMP8, %%XTMP9
+
+ mov qword [%%GDATA_CTX + PBlockLen], 0
+
+ ;; Set %%IA1 to be the number of bytes to write out
+ mov %%IA0, %%LENGTH
+ mov %%LENGTH, 16
+ sub %%LENGTH, %%IA0
+ jmp %%_enc_dec_done
+
+%%_partial_incomplete:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov %%IA0, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], %%IA0
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+ mov %%LENGTH, %%PLAIN_CYPH_LEN
+
+%%_enc_dec_done:
+ ;; output encrypted Bytes
+
+ lea %%IA0, [rel byte_len_to_mask_table]
+ kmovw %%MASKREG, [%%IA0 + %%LENGTH*2]
+ vmovdqu64 [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+%ifidn %%ENC_DEC, ENC
+ ;; shuffle XTMP1 back to output as ciphertext
+ vpshufb %%XTMP1, [rel SHUF_MASK]
+ vpshufb %%XTMP1, %%XTMP3
+%endif
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET]{%%MASKREG}, %%XTMP1
+ add %%DATA_OFFSET, %%LENGTH
+%%_partial_block_done:
+%endmacro ; PARTIAL_BLOCK
+
+
+%macro GHASH_SINGLE_MUL 9
+%define %%GDATA %1
+%define %%HASHKEY %2
+%define %%CIPHER %3
+%define %%STATE_11 %4
+%define %%STATE_00 %5
+%define %%STATE_MID %6
+%define %%T1 %7
+%define %%T2 %8
+%define %%FIRST %9
+
+ vmovdqu %%T1, [%%GDATA + %%HASHKEY]
+%ifidn %%FIRST, first
+ vpclmulqdq %%STATE_11, %%CIPHER, %%T1, 0x11 ; %%T4 = a1*b1
+ vpclmulqdq %%STATE_00, %%CIPHER, %%T1, 0x00 ; %%T4_2 = a0*b0
+ vpclmulqdq %%STATE_MID, %%CIPHER, %%T1, 0x01 ; %%T6 = a1*b0
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10 ; %%T5 = a0*b1
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%else
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x11
+ vpxor %%STATE_11, %%STATE_11, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x00
+ vpxor %%STATE_00, %%STATE_00, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x01
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+
+ vpclmulqdq %%T2, %%CIPHER, %%T1, 0x10
+ vpxor %%STATE_MID, %%STATE_MID, %%T2
+%endif
+
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; This macro is used to "warm-up" pipeline for GHASH_8_ENCRYPT_8_PARALLEL
+;;; macro code. It is called only for data lenghts 128 and above.
+;;; The flow is as follows:
+;;; - encrypt the initial %%num_initial_blocks blocks (can be 0)
+;;; - encrypt the next 8 blocks and stitch with
+;;; GHASH for the first %%num_initial_blocks
+;;; - the last 8th block can be partial (lengths between 129 and 239)
+;;; - partial block ciphering is handled within this macro
+;;; - top bytes of such block are cleared for
+;;; the subsequent GHASH calculations
+;;; - PBlockEncKey needs to be setup in case of multi-call
+;;; - top bytes of the block need to include encrypted counter block so that
+;;; when handling partial block case text is read and XOR'ed against it.
+;;; This needs to be in un-shuffled format.
+
+%macro INITIAL_BLOCKS 26-27
+%define %%GDATA_KEY %1 ; [in] pointer to GCM keys
+%define %%GDATA_CTX %2 ; [in] pointer to GCM context
+%define %%CYPH_PLAIN_OUT %3 ; [in] output buffer
+%define %%PLAIN_CYPH_IN %4 ; [in] input buffer
+%define %%LENGTH %5 ; [in/out] number of bytes to process
+%define %%DATA_OFFSET %6 ; [in/out] data offset
+%define %%num_initial_blocks %7 ; [in] can be 0, 1, 2, 3, 4, 5, 6 or 7
+%define %%CTR %8 ; [in/out] XMM counter block
+%define %%AAD_HASH %9 ; [in/out] ZMM with AAD hash
+%define %%ZT1 %10 ; [out] ZMM cipher blocks 0-3 for GHASH
+%define %%ZT2 %11 ; [out] ZMM cipher blocks 4-7 for GHASH
+%define %%ZT3 %12 ; [clobbered] ZMM temporary
+%define %%ZT4 %13 ; [clobbered] ZMM temporary
+%define %%ZT5 %14 ; [clobbered] ZMM temporary
+%define %%ZT6 %15 ; [clobbered] ZMM temporary
+%define %%ZT7 %16 ; [clobbered] ZMM temporary
+%define %%ZT8 %17 ; [clobbered] ZMM temporary
+%define %%ZT9 %18 ; [clobbered] ZMM temporary
+%define %%ZT10 %19 ; [clobbered] ZMM temporary
+%define %%ZT11 %20 ; [clobbered] ZMM temporary
+%define %%ZT12 %21 ; [clobbered] ZMM temporary
+%define %%IA0 %22 ; [clobbered] GP temporary
+%define %%IA1 %23 ; [clobbered] GP temporary
+%define %%ENC_DEC %24 ; [in] ENC/DEC selector
+%define %%MASKREG %25 ; [clobbered] mask register
+%define %%SHUFMASK %26 ; [in] ZMM with BE/LE shuffle mask
+%define %%PARTIAL_PRESENT %27 ; [in] "no_partial_block" option can be passed here (if length is guaranteed to be > 15*16 bytes)
+
+%define %%T1 XWORD(%%ZT1)
+%define %%T2 XWORD(%%ZT2)
+%define %%T3 XWORD(%%ZT3)
+%define %%T4 XWORD(%%ZT4)
+%define %%T5 XWORD(%%ZT5)
+%define %%T6 XWORD(%%ZT6)
+%define %%T7 XWORD(%%ZT7)
+%define %%T8 XWORD(%%ZT8)
+%define %%T9 XWORD(%%ZT9)
+
+%define %%TH %%ZT10
+%define %%TM %%ZT11
+%define %%TL %%ZT12
+
+;; determine if partial block code needs to be added
+%assign partial_block_possible 1
+%if %0 > 26
+%ifidn %%PARTIAL_PRESENT, no_partial_block
+%assign partial_block_possible 0
+%endif
+%endif
+
+%if %%num_initial_blocks > 0
+ ;; prepare AES counter blocks
+%if %%num_initial_blocks == 1
+ vpaddd %%T3, %%CTR, [rel ONE]
+%elif %%num_initial_blocks == 2
+ vshufi64x2 YWORD(%%ZT3), YWORD(%%CTR), YWORD(%%CTR), 0
+ vpaddd YWORD(%%ZT3), YWORD(%%ZT3), [rel ddq_add_1234]
+%else
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT3, ZWORD(%%CTR), [rel ddq_add_1234]
+ vpaddd %%ZT4, ZWORD(%%CTR), [rel ddq_add_5678]
+%endif
+
+ ;; extract new counter value (%%T3)
+ ;; shuffle the counters for AES rounds
+%if %%num_initial_blocks <= 4
+ vextracti32x4 %%CTR, %%ZT3, (%%num_initial_blocks - 1)
+%else
+ vextracti32x4 %%CTR, %%ZT4, (%%num_initial_blocks - 5)
+%endif
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+
+ ;; load plain/cipher text
+ ZMM_LOAD_BLOCKS_0_16 %%num_initial_blocks, %%PLAIN_CYPH_IN, %%DATA_OFFSET, \
+ %%ZT5, %%ZT6, no_zmm, no_zmm
+
+ ;; AES rounds and XOR with plain/cipher text
+%assign j 0
+%rep (NROUNDS + 2)
+ vbroadcastf64x2 %%ZT1, [%%GDATA_KEY + (j * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT1, j, \
+ %%ZT5, %%ZT6, no_zmm, no_zmm, \
+ %%num_initial_blocks, NROUNDS
+%assign j (j + 1)
+%endrep
+
+ ;; write cipher/plain text back to output and
+ ;; zero bytes outside the mask before hashing
+ ZMM_STORE_BLOCKS_0_16 %%num_initial_blocks, %%CYPH_PLAIN_OUT, %%DATA_OFFSET, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm
+
+ ;; Shuffle the cipher text blocks for hashing part
+ ;; ZT5 and ZT6 are expected outputs with blocks for hashing
+%ifidn %%ENC_DEC, DEC
+ ;; Decrypt case
+ ;; - cipher blocks are in ZT5 & ZT6
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%ZT5, %%ZT6, no_zmm, no_zmm, \
+ %%ZT5, %%ZT6, no_zmm, no_zmm, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+%else
+ ;; Encrypt case
+ ;; - cipher blocks are in ZT3 & ZT4
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%ZT5, %%ZT6, no_zmm, no_zmm, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+%endif ; Encrypt
+
+ ;; adjust data offset and length
+ sub %%LENGTH, (%%num_initial_blocks * 16)
+ add %%DATA_OFFSET, (%%num_initial_blocks * 16)
+
+ ;; At this stage
+ ;; - ZT5:ZT6 include cipher blocks to be GHASH'ed
+
+%endif ; %%num_initial_blocks > 0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; - cipher of %%num_initial_blocks is done
+ ;; - prepare counter blocks for the next 8 blocks (ZT3 & ZT4)
+ ;; - save the last block in %%CTR
+ ;; - shuffle the blocks for AES
+ ;; - stitch encryption of the new blocks with
+ ;; GHASHING the previous blocks
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%ZT3, ZWORD(%%CTR), [rel ddq_add_1234]
+ vpaddd %%ZT4, ZWORD(%%CTR), [rel ddq_add_5678]
+ vextracti32x4 %%CTR, %%ZT4, 3
+
+ vpshufb %%ZT3, %%SHUFMASK
+ vpshufb %%ZT4, %%SHUFMASK
+
+%if partial_block_possible != 0
+ ;; get text load/store mask (assume full mask by default)
+ mov %%IA0, 0xffff_ffff_ffff_ffff
+%if %%num_initial_blocks > 0
+ ;; NOTE: 'jge' is always taken for %%num_initial_blocks = 0
+ ;; This macro is executed for lenght 128 and up,
+ ;; zero length is checked in GCM_ENC_DEC.
+ ;; We know there is partial block if:
+ ;; LENGTH - 16*num_initial_blocks < 128
+ cmp %%LENGTH, 128
+ jge %%_initial_partial_block_continue
+ mov %%IA1, rcx
+ mov rcx, 128
+ sub rcx, %%LENGTH
+ shr %%IA0, cl
+ mov rcx, %%IA1
+%%_initial_partial_block_continue:
+%endif
+ kmovq %%MASKREG, %%IA0
+ ;; load plain or cipher text (masked)
+ ZMM_LOAD_MASKED_BLOCKS_0_16 8, %%PLAIN_CYPH_IN, %%DATA_OFFSET, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm, %%MASKREG
+%else
+ ;; load plain or cipher text
+ ZMM_LOAD_BLOCKS_0_16 8, %%PLAIN_CYPH_IN, %%DATA_OFFSET, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm
+%endif ;; partial_block_possible
+
+ ;; === AES ROUND 0
+%assign aes_round 0
+ vbroadcastf64x2 %%ZT8, [%%GDATA_KEY + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT8, aes_round, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+
+ ;; === GHASH blocks 4-7
+%if (%%num_initial_blocks > 0)
+ ;; Hash in AES state
+ vpxorq %%ZT5, %%ZT5, %%AAD_HASH
+
+ VCLMUL_1_TO_8_STEP1 %%GDATA_KEY, %%ZT6, %%ZT8, %%ZT9, \
+ %%TH, %%TM, %%TL, %%num_initial_blocks
+%endif
+
+ ;; === [1/3] of AES rounds
+
+%rep ((NROUNDS + 1) / 3)
+ vbroadcastf64x2 %%ZT8, [%%GDATA_KEY + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT8, aes_round, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endrep ; %rep ((NROUNDS + 1) / 2)
+
+ ;; === GHASH blocks 0-3 and gather
+%if (%%num_initial_blocks > 0)
+ VCLMUL_1_TO_8_STEP2 %%GDATA_KEY, %%ZT6, %%ZT5, \
+ %%ZT7, %%ZT8, %%ZT9, \
+ %%TH, %%TM, %%TL, %%num_initial_blocks
+%endif
+
+ ;; === [2/3] of AES rounds
+
+%rep ((NROUNDS + 1) / 3)
+ vbroadcastf64x2 %%ZT8, [%%GDATA_KEY + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT8, aes_round, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endrep ; %rep ((NROUNDS + 1) / 2)
+
+ ;; === GHASH reduction
+
+%if (%%num_initial_blocks > 0)
+ ;; [out] AAD_HASH - hash output
+ ;; [in] T8 - polynomial
+ ;; [in] T6 - high, T5 - low
+ ;; [clobbered] T9, T7 - temporary
+ vmovdqu64 %%T8, [rel POLY2]
+ VCLMUL_REDUCE XWORD(%%AAD_HASH), %%T8, %%T6, %%T5, %%T7, %%T9
+%endif
+
+ ;; === [3/3] of AES rounds
+
+%rep (((NROUNDS + 1) / 3) + 2)
+%if aes_round < (NROUNDS + 2)
+ vbroadcastf64x2 %%ZT8, [%%GDATA_KEY + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT3, %%ZT4, no_zmm, no_zmm, \
+ %%ZT8, aes_round, \
+ %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endif
+%endrep ; %rep ((NROUNDS + 1) / 2)
+
+%if partial_block_possible != 0
+ ;; write cipher/plain text back to output and
+ ;; zero bytes outside the mask before hashing
+ ZMM_STORE_MASKED_BLOCKS_0_16 8, %%CYPH_PLAIN_OUT, %%DATA_OFFSET, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm, %%MASKREG
+ ;; check if there is partial block
+ cmp %%LENGTH, 128
+ jl %%_initial_save_partial
+ ;; adjust offset and length
+ add %%DATA_OFFSET, 128
+ sub %%LENGTH, 128
+ jmp %%_initial_blocks_done
+%%_initial_save_partial:
+ ;; partial block case
+ ;; - save the partial block in unshuffled format
+ ;; - ZT4 is partially XOR'ed with data and top bytes contain
+ ;; encrypted counter block only
+ ;; - save number of bytes process in the partial block
+ ;; - adjust offset and zero the length
+ ;; - clear top bytes of the partial block for subsequent GHASH calculations
+ vextracti32x4 [%%GDATA_CTX + PBlockEncKey], %%ZT4, 3
+ add %%DATA_OFFSET, %%LENGTH
+ sub %%LENGTH, (128 - 16)
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+ xor %%LENGTH, %%LENGTH
+ vmovdqu8 %%ZT4{%%MASKREG}{z}, %%ZT4
+%%_initial_blocks_done:
+%else
+ ZMM_STORE_BLOCKS_0_16 8, %%CYPH_PLAIN_OUT, %%DATA_OFFSET, \
+ %%ZT3, %%ZT4, no_zmm, no_zmm
+ add %%DATA_OFFSET, 128
+ sub %%LENGTH, 128
+%endif ;; partial_block_possible
+
+ ;; Shuffle AES result for GHASH.
+%ifidn %%ENC_DEC, DEC
+ ;; Decrypt case
+ ;; - cipher blocks are in ZT1 & ZT2
+ vpshufb %%ZT1, %%SHUFMASK
+ vpshufb %%ZT2, %%SHUFMASK
+%else
+ ;; Encrypt case
+ ;; - cipher blocks are in ZT3 & ZT4
+ vpshufb %%ZT1, %%ZT3, %%SHUFMASK
+ vpshufb %%ZT2, %%ZT4, %%SHUFMASK
+%endif ; Encrypt
+
+ ;; Current hash value is in AAD_HASH
+
+ ;; Combine GHASHed value with the corresponding ciphertext
+ vpxorq %%ZT1, %%ZT1, %%AAD_HASH
+
+%endmacro ; INITIAL_BLOCKS
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; INITIAL_BLOCKS_PARTIAL macro with support for a partial final block.
+;;; It may look similar to INITIAL_BLOCKS but its usage is different:
+;;; - first encrypts/decrypts required number of blocks and then
+;;; ghashes these blocks
+;;; - Small packets or left over data chunks (<256 bytes)
+;;; - single or multi call
+;;; - Remaining data chunks below 256 bytes (multi buffer code)
+;;;
+;;; num_initial_blocks is expected to include the partial final block
+;;; in the count.
+%macro INITIAL_BLOCKS_PARTIAL 41
+%define %%GDATA_KEY %1 ; [in] key pointer
+%define %%GDATA_CTX %2 ; [in] context pointer
+%define %%CYPH_PLAIN_OUT %3 ; [in] text out pointer
+%define %%PLAIN_CYPH_IN %4 ; [in] text out pointer
+%define %%LENGTH %5 ; [in/clobbered] length in bytes
+%define %%DATA_OFFSET %6 ; [in/out] current data offset (updated)
+%define %%num_initial_blocks %7 ; [in] can only be 1, 2, 3, 4, 5, ..., 15 or 16 (not 0)
+%define %%CTR %8 ; [in/out] current counter value
+%define %%HASH_IN_OUT %9 ; [in/out] XMM ghash in/out value
+%define %%ENC_DEC %10 ; [in] cipher direction (ENC/DEC)
+%define %%INSTANCE_TYPE %11 ; [in] multi_call or single_call
+%define %%ZT0 %12 ; [clobbered] ZMM temporary
+%define %%ZT1 %13 ; [clobbered] ZMM temporary
+%define %%ZT2 %14 ; [clobbered] ZMM temporary
+%define %%ZT3 %15 ; [clobbered] ZMM temporary
+%define %%ZT4 %16 ; [clobbered] ZMM temporary
+%define %%ZT5 %17 ; [clobbered] ZMM temporary
+%define %%ZT6 %18 ; [clobbered] ZMM temporary
+%define %%ZT7 %19 ; [clobbered] ZMM temporary
+%define %%ZT8 %20 ; [clobbered] ZMM temporary
+%define %%ZT9 %21 ; [clobbered] ZMM temporary
+%define %%ZT10 %22 ; [clobbered] ZMM temporary
+%define %%ZT11 %23 ; [clobbered] ZMM temporary
+%define %%ZT12 %24 ; [clobbered] ZMM temporary
+%define %%ZT13 %25 ; [clobbered] ZMM temporary
+%define %%ZT14 %26 ; [clobbered] ZMM temporary
+%define %%ZT15 %27 ; [clobbered] ZMM temporary
+%define %%ZT16 %28 ; [clobbered] ZMM temporary
+%define %%ZT17 %29 ; [clobbered] ZMM temporary
+%define %%ZT18 %30 ; [clobbered] ZMM temporary
+%define %%ZT19 %31 ; [clobbered] ZMM temporary
+%define %%ZT20 %32 ; [clobbered] ZMM temporary
+%define %%ZT21 %33 ; [clobbered] ZMM temporary
+%define %%ZT22 %34 ; [clobbered] ZMM temporary
+%define %%GH %35 ; [in] ZMM ghash sum (high)
+%define %%GL %36 ; [in] ZMM ghash sum (low)
+%define %%GM %37 ; [in] ZMM ghash sum (middle)
+%define %%IA0 %38 ; [clobbered] GP temporary
+%define %%IA1 %39 ; [clobbered] GP temporary
+%define %%MASKREG %40 ; [clobbered] mask register
+%define %%SHUFMASK %41 ; [in] ZMM with BE/LE shuffle mask
+
+%define %%T1 XWORD(%%ZT1)
+%define %%T2 XWORD(%%ZT2)
+%define %%T7 XWORD(%%ZT7)
+
+%define %%CTR0 %%ZT3
+%define %%CTR1 %%ZT4
+%define %%CTR2 %%ZT8
+%define %%CTR3 %%ZT9
+
+%define %%DAT0 %%ZT5
+%define %%DAT1 %%ZT6
+%define %%DAT2 %%ZT10
+%define %%DAT3 %%ZT11
+
+%ifnidn %%GH, no_zmm
+%ifnidn %%GL, no_zmm
+%ifnidn %%GM, no_zmm
+ ;; when temporary sums are passed then zero HASH IN value
+ ;; - whatever it holds it is invalid in this case
+ vpxorq %%HASH_IN_OUT, %%HASH_IN_OUT
+%endif
+%endif
+%endif
+ ;; Copy ghash to temp reg
+ vmovdqa64 %%T2, %%HASH_IN_OUT
+
+ ;; prepare AES counter blocks
+%if %%num_initial_blocks == 1
+ vpaddd XWORD(%%CTR0), %%CTR, [rel ONE]
+%elif %%num_initial_blocks == 2
+ vshufi64x2 YWORD(%%CTR0), YWORD(%%CTR), YWORD(%%CTR), 0
+ vpaddd YWORD(%%CTR0), YWORD(%%CTR0), [rel ddq_add_1234]
+%else
+ vshufi64x2 ZWORD(%%CTR), ZWORD(%%CTR), ZWORD(%%CTR), 0
+ vpaddd %%CTR0, ZWORD(%%CTR), [rel ddq_add_1234]
+%if %%num_initial_blocks > 4
+ vpaddd %%CTR1, ZWORD(%%CTR), [rel ddq_add_5678]
+%endif
+%if %%num_initial_blocks > 8
+ vpaddd %%CTR2, %%CTR0, [rel ddq_add_8888]
+%endif
+%if %%num_initial_blocks > 12
+ vpaddd %%CTR3, %%CTR1, [rel ddq_add_8888]
+%endif
+%endif
+
+ ;; get load/store mask
+ lea %%IA0, [rel byte64_len_to_mask_table]
+ mov %%IA1, %%LENGTH
+%if %%num_initial_blocks > 12
+ sub %%IA1, 3 * 64
+%elif %%num_initial_blocks > 8
+ sub %%IA1, 2 * 64
+%elif %%num_initial_blocks > 4
+ sub %%IA1, 64
+%endif
+ kmovq %%MASKREG, [%%IA0 + %%IA1*8]
+
+ ;; extract new counter value
+ ;; shuffle the counters for AES rounds
+%if %%num_initial_blocks <= 4
+ vextracti32x4 %%CTR, %%CTR0, (%%num_initial_blocks - 1)
+%elif %%num_initial_blocks <= 8
+ vextracti32x4 %%CTR, %%CTR1, (%%num_initial_blocks - 5)
+%elif %%num_initial_blocks <= 12
+ vextracti32x4 %%CTR, %%CTR2, (%%num_initial_blocks - 9)
+%else
+ vextracti32x4 %%CTR, %%CTR3, (%%num_initial_blocks - 13)
+%endif
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%CTR0, %%CTR1, %%CTR2, %%CTR3, \
+ %%CTR0, %%CTR1, %%CTR2, %%CTR3, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+
+ ;; load plain/cipher text
+ ZMM_LOAD_MASKED_BLOCKS_0_16 %%num_initial_blocks, %%PLAIN_CYPH_IN, %%DATA_OFFSET, \
+ %%DAT0, %%DAT1, %%DAT2, %%DAT3, %%MASKREG
+
+ ;; AES rounds and XOR with plain/cipher text
+%assign j 0
+%rep (NROUNDS + 2)
+ vbroadcastf64x2 %%ZT1, [%%GDATA_KEY + (j * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%CTR0, %%CTR1, %%CTR2, %%CTR3, \
+ %%ZT1, j, \
+ %%DAT0, %%DAT1, %%DAT2, %%DAT3, \
+ %%num_initial_blocks, NROUNDS
+%assign j (j + 1)
+%endrep
+
+ ;; retrieve the last cipher counter block (partially XOR'ed with text)
+ ;; - this is needed for partial block cases
+%if %%num_initial_blocks <= 4
+ vextracti32x4 %%T1, %%CTR0, (%%num_initial_blocks - 1)
+%elif %%num_initial_blocks <= 8
+ vextracti32x4 %%T1, %%CTR1, (%%num_initial_blocks - 5)
+%elif %%num_initial_blocks <= 12
+ vextracti32x4 %%T1, %%CTR2, (%%num_initial_blocks - 9)
+%else
+ vextracti32x4 %%T1, %%CTR3, (%%num_initial_blocks - 13)
+%endif
+
+ ;; write cipher/plain text back to output and
+ ZMM_STORE_MASKED_BLOCKS_0_16 %%num_initial_blocks, %%CYPH_PLAIN_OUT, %%DATA_OFFSET, \
+ %%CTR0, %%CTR1, %%CTR2, %%CTR3, %%MASKREG
+
+ ;; zero bytes outside the mask before hashing
+%if %%num_initial_blocks <= 4
+ vmovdqu8 %%CTR0{%%MASKREG}{z}, %%CTR0
+%elif %%num_initial_blocks <= 8
+ vmovdqu8 %%CTR1{%%MASKREG}{z}, %%CTR1
+%elif %%num_initial_blocks <= 12
+ vmovdqu8 %%CTR2{%%MASKREG}{z}, %%CTR2
+%else
+ vmovdqu8 %%CTR3{%%MASKREG}{z}, %%CTR3
+%endif
+
+ ;; Shuffle the cipher text blocks for hashing part
+ ;; ZT5 and ZT6 are expected outputs with blocks for hashing
+%ifidn %%ENC_DEC, DEC
+ ;; Decrypt case
+ ;; - cipher blocks are in ZT5 & ZT6
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%DAT0, %%DAT1, %%DAT2, %%DAT3, \
+ %%DAT0, %%DAT1, %%DAT2, %%DAT3, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+%else
+ ;; Encrypt case
+ ;; - cipher blocks are in CTR0-CTR3
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%num_initial_blocks, vpshufb, \
+ %%DAT0, %%DAT1, %%DAT2, %%DAT3, \
+ %%CTR0, %%CTR1, %%CTR2, %%CTR3, \
+ %%SHUFMASK, %%SHUFMASK, %%SHUFMASK, %%SHUFMASK
+%endif ; Encrypt
+
+ ;; Extract the last block for partials and multi_call cases
+%if %%num_initial_blocks <= 4
+ vextracti32x4 %%T7, %%DAT0, %%num_initial_blocks - 1
+%elif %%num_initial_blocks <= 8
+ vextracti32x4 %%T7, %%DAT1, %%num_initial_blocks - 5
+%elif %%num_initial_blocks <= 12
+ vextracti32x4 %%T7, %%DAT2, %%num_initial_blocks - 9
+%else
+ vextracti32x4 %%T7, %%DAT3, %%num_initial_blocks - 13
+%endif
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Hash all but the last block of data
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;; update data offset
+%if %%num_initial_blocks > 1
+ ;; The final block of data may be <16B
+ add %%DATA_OFFSET, 16 * (%%num_initial_blocks - 1)
+ sub %%LENGTH, 16 * (%%num_initial_blocks - 1)
+%endif
+
+%if %%num_initial_blocks < 16
+ ;; NOTE: the 'jl' is always taken for num_initial_blocks = 16.
+ ;; This is run in the context of GCM_ENC_DEC_SMALL for length < 256.
+ cmp %%LENGTH, 16
+ jl %%_small_initial_partial_block
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Handle a full length final block - encrypt and hash all blocks
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ sub %%LENGTH, 16
+ add %%DATA_OFFSET, 16
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+
+ ;; Hash all of the data
+
+ ;; ZT2 - incoming AAD hash (low 128bits)
+ ;; ZT12-ZT20 - temporary registers
+ GHASH_1_TO_16 %%GDATA_KEY, %%HASH_IN_OUT, \
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15, %%ZT16, \
+ %%ZT17, %%ZT18, %%ZT19, %%ZT20, \
+ %%GH, %%GL, %%GM, \
+ %%ZT2, %%DAT0, %%DAT1, %%DAT2, %%DAT3, \
+ %%num_initial_blocks
+
+ jmp %%_small_initial_compute_done
+%endif ; %if %%num_initial_blocks < 16
+
+%%_small_initial_partial_block:
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;;; Handle ghash for a <16B final block
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;; In this case if it's a single call to encrypt we can
+ ;; hash all of the data but if it's an init / update / finalize
+ ;; series of call we need to leave the last block if it's
+ ;; less than a full block of data.
+
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+ ;; %%T1 is ciphered counter block
+ vmovdqu64 [%%GDATA_CTX + PBlockEncKey], %%T1
+
+%ifidn %%INSTANCE_TYPE, multi_call
+%assign k (%%num_initial_blocks - 1)
+%assign last_block_to_hash 1
+%else
+%assign k (%%num_initial_blocks)
+%assign last_block_to_hash 0
+%endif
+
+%if (%%num_initial_blocks > last_block_to_hash)
+
+ ;; ZT12-ZT20 - temporary registers
+ GHASH_1_TO_16 %%GDATA_KEY, %%HASH_IN_OUT, \
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15, %%ZT16, \
+ %%ZT17, %%ZT18, %%ZT19, %%ZT20, \
+ %%GH, %%GL, %%GM, \
+ %%ZT2, %%DAT0, %%DAT1, %%DAT2, %%DAT3, k
+
+ ;; just fall through no jmp needed
+%else
+ ;; Record that a reduction is not needed -
+ ;; In this case no hashes are computed because there
+ ;; is only one initial block and it is < 16B in length.
+ ;; We only need to check if a reduction is needed if
+ ;; initial_blocks == 1 and init/update/final is being used.
+ ;; In this case we may just have a partial block, and that
+ ;; gets hashed in finalize.
+
+%assign need_for_reduction 1
+%ifidn %%GH, no_zmm
+%ifidn %%GL, no_zmm
+%ifidn %%GM, no_zmm
+;; if %%GH, %%GL & %%GM not passed then reduction is not required
+%assign need_for_reduction 0
+%endif
+%endif
+%endif
+
+%if need_for_reduction == 0
+ ;; The hash should end up in HASH_IN_OUT.
+ ;; The only way we should get here is if there is
+ ;; a partial block of data, so xor that into the hash.
+ vpxorq %%HASH_IN_OUT, %%T2, %%T7
+%else
+ ;; right - here we have nothing to ghash in the small data but
+ ;; we have GHASH sums passed through that we need to gather and reduce
+
+ ;; integrate TM into TH and TL
+ vpsrldq %%ZT12, %%GM, 8
+ vpslldq %%ZT13, %%GM, 8
+ vpxorq %%GH, %%GH, %%ZT12
+ vpxorq %%GL, %%GL, %%ZT13
+
+ ;; add TH and TL 128-bit words horizontally
+ VHPXORI4x128 %%GH, %%ZT12
+ VHPXORI4x128 %%GL, %%ZT13
+
+ ;; reduction
+ vmovdqa64 XWORD(%%ZT12), [rel POLY2]
+ VCLMUL_REDUCE %%HASH_IN_OUT, XWORD(%%ZT12), \
+ XWORD(%%GH), XWORD(%%GL), XWORD(%%ZT13), XWORD(%%ZT14)
+
+ vpxorq %%HASH_IN_OUT, %%HASH_IN_OUT, %%T7
+%endif
+ ;; The result is in %%HASH_IN_OUT
+ jmp %%_after_reduction
+%endif
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; After GHASH reduction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_small_initial_compute_done:
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If using init/update/finalize, we need to xor any partial block data
+ ;; into the hash.
+%if %%num_initial_blocks > 1
+ ;; NOTE: for %%num_initial_blocks = 0 the xor never takes place
+%if %%num_initial_blocks != 16
+ ;; NOTE: for %%num_initial_blocks = 16, %%LENGTH, stored in [PBlockLen] is never zero
+ or %%LENGTH, %%LENGTH
+ je %%_after_reduction
+%endif ; %%num_initial_blocks != 16
+ vpxorq %%HASH_IN_OUT, %%HASH_IN_OUT, %%T7
+%endif ; %%num_initial_blocks > 1
+%endif ; %%INSTANCE_TYPE, multi_call
+
+%%_after_reduction:
+ ;; Final hash is now in HASH_IN_OUT
+
+%endmacro ; INITIAL_BLOCKS_PARTIAL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Main GCM macro stitching cipher with GHASH
+;;; - operates on single stream
+;;; - encrypts 8 blocks at a time
+;;; - ghash the 8 previously encrypted ciphertext blocks
+;;; For partial block case and multi_call , AES_PARTIAL_BLOCK on output
+;;; contains encrypted counter block.
+%macro GHASH_8_ENCRYPT_8_PARALLEL 34-37
+%define %%GDATA %1 ; [in] key pointer
+%define %%CYPH_PLAIN_OUT %2 ; [in] pointer to output buffer
+%define %%PLAIN_CYPH_IN %3 ; [in] pointer to input buffer
+%define %%DATA_OFFSET %4 ; [in] data offset
+%define %%CTR1 %5 ; [in/out] ZMM counter blocks 0 to 3
+%define %%CTR2 %6 ; [in/out] ZMM counter blocks 4 to 7
+%define %%GHASHIN_AESOUT_B03 %7 ; [in/out] ZMM ghash in / aes out blocks 0 to 3
+%define %%GHASHIN_AESOUT_B47 %8 ; [in/out] ZMM ghash in / aes out blocks 4 to 7
+%define %%AES_PARTIAL_BLOCK %9 ; [out] XMM partial block (AES)
+%define %%loop_idx %10 ; [in] counter block prep selection "add+shuffle" or "add"
+%define %%ENC_DEC %11 ; [in] cipher direction
+%define %%FULL_PARTIAL %12 ; [in] last block type selection "full" or "partial"
+%define %%IA0 %13 ; [clobbered] temporary GP register
+%define %%IA1 %14 ; [clobbered] temporary GP register
+%define %%LENGTH %15 ; [in] length
+%define %%INSTANCE_TYPE %16 ; [in] 'single_call' or 'multi_call' selection
+%define %%GH4KEY %17 ; [in] ZMM with GHASH keys 4 to 1
+%define %%GH8KEY %18 ; [in] ZMM with GHASH keys 8 to 5
+%define %%SHFMSK %19 ; [in] ZMM with byte swap mask for pshufb
+%define %%ZT1 %20 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT2 %21 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT3 %22 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT4 %23 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT5 %24 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT10 %25 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT11 %26 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT12 %27 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT13 %28 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT14 %29 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT15 %30 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT16 %31 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT17 %32 ; [clobbered] temporary ZMM (ghash)
+%define %%MASKREG %33 ; [clobbered] mask register for partial loads/stores
+%define %%DO_REDUCTION %34 ; [in] "reduction", "no_reduction", "final_reduction"
+%define %%TO_REDUCE_L %35 ; [in/out] ZMM for low 4x128-bit in case of "no_reduction"
+%define %%TO_REDUCE_H %36 ; [in/out] ZMM for hi 4x128-bit in case of "no_reduction"
+%define %%TO_REDUCE_M %37 ; [in/out] ZMM for medium 4x128-bit in case of "no_reduction"
+
+%define %%GH1H %%ZT10
+%define %%GH1L %%ZT11
+%define %%GH1M1 %%ZT12
+%define %%GH1M2 %%ZT13
+
+%define %%GH2H %%ZT14
+%define %%GH2L %%ZT15
+%define %%GH2M1 %%ZT16
+%define %%GH2M2 %%ZT17
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; populate counter blocks for cipher part
+%ifidn %%loop_idx, in_order
+ ;; %%CTR1 & %%CTR2 are shuffled outside the scope of this macro
+ ;; it has to be kept in unshuffled format
+ vpshufb %%ZT1, %%CTR1, %%SHFMSK
+ vpshufb %%ZT2, %%CTR2, %%SHFMSK
+%else
+ vmovdqa64 %%ZT1, %%CTR1
+ vmovdqa64 %%ZT2, %%CTR2
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; stitch AES rounds with GHASH
+
+%assign aes_round 0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 0 - ARK
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+
+ ;;==================================================
+ ;; GHASH 4 blocks
+ vpclmulqdq %%GH1H, %%GHASHIN_AESOUT_B47, %%GH4KEY, 0x11 ; a1*b1
+ vpclmulqdq %%GH1L, %%GHASHIN_AESOUT_B47, %%GH4KEY, 0x00 ; a0*b0
+ vpclmulqdq %%GH1M1, %%GHASHIN_AESOUT_B47, %%GH4KEY, 0x01 ; a1*b0
+ vpclmulqdq %%GH1M2, %%GHASHIN_AESOUT_B47, %%GH4KEY, 0x10 ; a0*b1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; 3 AES rounds
+%rep 3
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endrep ; 3 x AES ROUND
+
+ ;; =================================================
+ ;; GHASH 4 blocks
+ vpclmulqdq %%GH2M1, %%GHASHIN_AESOUT_B03, %%GH8KEY, 0x10 ; a0*b1
+ vpclmulqdq %%GH2M2, %%GHASHIN_AESOUT_B03, %%GH8KEY, 0x01 ; a1*b0
+ vpclmulqdq %%GH2H, %%GHASHIN_AESOUT_B03, %%GH8KEY, 0x11 ; a1*b1
+ vpclmulqdq %%GH2L, %%GHASHIN_AESOUT_B03, %%GH8KEY, 0x00 ; a0*b0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; 3 AES rounds
+%rep 3
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endrep ; 3 x AES ROUND
+
+ ;; =================================================
+ ;; gather GHASH in GH1L (low) and GH1H (high)
+%ifidn %%DO_REDUCTION, no_reduction
+ vpternlogq %%GH1M1, %%GH1M2, %%GH2M1, 0x96 ; TM: GH1M1 ^= GH1M2 ^ GH2M1
+ vpternlogq %%TO_REDUCE_M, %%GH1M1, %%GH2M2, 0x96 ; TM: TO_REDUCE_M ^= GH1M1 ^ GH2M2
+ vpternlogq %%TO_REDUCE_H, %%GH1H, %%GH2H, 0x96 ; TH: TO_REDUCE_H ^= GH1H ^ GH2H
+ vpternlogq %%TO_REDUCE_L, %%GH1L, %%GH2L, 0x96 ; TL: TO_REDUCE_L ^= GH1L ^ GH2L
+%endif
+%ifidn %%DO_REDUCTION, do_reduction
+ ;; phase 1: add mid products together
+ vpternlogq %%GH1M1, %%GH1M2, %%GH2M1, 0x96 ; TM: GH1M1 ^= GH1M2 ^ GH2M1
+ vpxorq %%GH1M1, %%GH1M1, %%GH2M2
+
+ vpsrldq %%GH2M1, %%GH1M1, 8
+ vpslldq %%GH1M1, %%GH1M1, 8
+%endif
+%ifidn %%DO_REDUCTION, final_reduction
+ ;; phase 1: add mid products together
+ vpternlogq %%GH1M1, %%GH1M2, %%GH2M1, 0x96 ; TM: GH1M1 ^= GH1M2 ^ GH2M1
+ vpternlogq %%GH1M1, %%TO_REDUCE_M, %%GH2M2, 0x96 ; TM: GH1M1 ^= TO_REDUCE_M ^ GH2M2
+
+ vpsrldq %%GH2M1, %%GH1M1, 8
+ vpslldq %%GH1M1, %%GH1M1, 8
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; 2 AES rounds
+%rep 2
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endrep ; 2 x AES ROUND
+
+ ;; =================================================
+ ;; Add mid product to high and low then
+ ;; horizontal xor of low and high 4x128
+%ifidn %%DO_REDUCTION, final_reduction
+ vpternlogq %%GH1H, %%GH2H, %%GH2M1, 0x96 ; TH = TH1 + TH2 + TM>>64
+ vpxorq %%GH1H, %%TO_REDUCE_H
+ vpternlogq %%GH1L, %%GH2L, %%GH1M1, 0x96 ; TL = TL1 + TL2 + TM<<64
+ vpxorq %%GH1L, %%TO_REDUCE_L
+%endif
+%ifidn %%DO_REDUCTION, do_reduction
+ vpternlogq %%GH1H, %%GH2H, %%GH2M1, 0x96 ; TH = TH1 + TH2 + TM>>64
+ vpternlogq %%GH1L, %%GH2L, %%GH1M1, 0x96 ; TL = TL1 + TL2 + TM<<64
+%endif
+%ifnidn %%DO_REDUCTION, no_reduction
+ VHPXORI4x128 %%GH1H, %%GH2H
+ VHPXORI4x128 %%GH1L, %%GH2L
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; 2 AES rounds
+%rep 2
+%if (aes_round < (NROUNDS + 1))
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endif ; aes_round < (NROUNDS + 1)
+%endrep
+
+ ;; =================================================
+ ;; first phase of reduction
+%ifnidn %%DO_REDUCTION, no_reduction
+ vmovdqu64 XWORD(%%GH2M2), [rel POLY2]
+ vpclmulqdq XWORD(%%ZT15), XWORD(%%GH2M2), XWORD(%%GH1L), 0x01
+ vpslldq XWORD(%%ZT15), XWORD(%%ZT15), 8 ; shift-L 2 DWs
+ vpxorq XWORD(%%ZT15), XWORD(%%GH1L), XWORD(%%ZT15) ; first phase of the reduct
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; 2 AES rounds
+%rep 2
+%if (aes_round < (NROUNDS + 1))
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endif ; aes_round < (NROUNDS + 1)
+%endrep
+
+ ;; =================================================
+ ;; second phase of the reduction
+%ifnidn %%DO_REDUCTION, no_reduction
+ vpclmulqdq XWORD(%%ZT16), XWORD(%%GH2M2), XWORD(%%ZT15), 0x00
+ vpsrldq XWORD(%%ZT16), XWORD(%%ZT16), 4 ; shift-R 1-DW to obtain 2-DWs shift-R
+
+ vpclmulqdq XWORD(%%ZT13), XWORD(%%GH2M2), XWORD(%%ZT15), 0x10
+ vpslldq XWORD(%%ZT13), XWORD(%%ZT13), 4 ; shift-L 1-DW for result without shifts
+ ;; ZT13 = ZT13 xor ZT16 xor GH1H
+ vpternlogq XWORD(%%ZT13), XWORD(%%ZT16), XWORD(%%GH1H), 0x96
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; all remaining AES rounds but the last
+%rep (NROUNDS + 2)
+%if (aes_round < (NROUNDS + 1))
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+%assign aes_round (aes_round + 1)
+%endif ; aes_round < (NROUNDS + 1)
+%endrep
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; load/store mask (partial case) and load the text data
+%ifidn %%FULL_PARTIAL, full
+ VX512LDR %%ZT4, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ VX512LDR %%ZT5, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 64]
+%else
+ lea %%IA0, [rel byte64_len_to_mask_table]
+ mov %%IA1, %%LENGTH
+ sub %%IA1, 64
+ kmovq %%MASKREG, [%%IA0 + 8*%%IA1]
+ VX512LDR %%ZT4, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ vmovdqu8 %%ZT5{%%MASKREG}{z}, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 64]
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; the last AES round (NROUNDS + 1) and XOR against plain/cipher text
+ vbroadcastf64x2 %%ZT3, [%%GDATA + (aes_round * 16)]
+ ZMM_AESENC_ROUND_BLOCKS_0_16 %%ZT1, %%ZT2, no_zmm, no_zmm, \
+ %%ZT3, aes_round, \
+ %%ZT4, %%ZT5, no_zmm, no_zmm, \
+ 8, NROUNDS
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; store the cipher/plain text data
+%ifidn %%FULL_PARTIAL, full
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], %%ZT1
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 64], %%ZT2
+%else
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], %%ZT1
+ vmovdqu8 [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 64]{%%MASKREG}, %%ZT2
+%endif
+
+ ;; =================================================
+ ;; prep cipher text blocks for the next ghash round
+
+%ifnidn %%FULL_PARTIAL, full
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; for partial block & multi_call we need encrypted counter block
+ vpxorq %%ZT3, %%ZT2, %%ZT5
+ vextracti32x4 %%AES_PARTIAL_BLOCK, %%ZT3, 3
+%endif
+ ;; for GHASH computation purpose clear the top bytes of the partial block
+%ifidn %%ENC_DEC, ENC
+ vmovdqu8 %%ZT2{%%MASKREG}{z}, %%ZT2
+%else
+ vmovdqu8 %%ZT5{%%MASKREG}{z}, %%ZT5
+%endif
+%endif ; %ifnidn %%FULL_PARTIAL, full
+
+ ;; =================================================
+ ;; shuffle cipher text blocks for GHASH computation
+%ifidn %%ENC_DEC, ENC
+ vpshufb %%GHASHIN_AESOUT_B03, %%ZT1, %%SHFMSK
+ vpshufb %%GHASHIN_AESOUT_B47, %%ZT2, %%SHFMSK
+%else
+ vpshufb %%GHASHIN_AESOUT_B03, %%ZT4, %%SHFMSK
+ vpshufb %%GHASHIN_AESOUT_B47, %%ZT5, %%SHFMSK
+%endif
+
+%ifidn %%DO_REDUCTION, do_reduction
+ ;; =================================================
+ ;; XOR current GHASH value (ZT13) into block 0
+ vpxorq %%GHASHIN_AESOUT_B03, %%ZT13
+%endif
+%ifidn %%DO_REDUCTION, final_reduction
+ ;; =================================================
+ ;; Return GHASH value (ZT13) in TO_REDUCE_L
+ vmovdqa64 %%TO_REDUCE_L, %%ZT13
+%endif
+
+%endmacro ; GHASH_8_ENCRYPT_8_PARALLEL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Main GCM macro stitching cipher with GHASH
+;;; - operates on single stream
+;;; - encrypts 16 blocks at a time
+;;; - ghash the 16 previously encrypted ciphertext blocks
+;;; - no partial block or multi_call handling here
+%macro GHASH_16_ENCRYPT_16_PARALLEL 42
+%define %%GDATA %1 ; [in] key pointer
+%define %%CYPH_PLAIN_OUT %2 ; [in] pointer to output buffer
+%define %%PLAIN_CYPH_IN %3 ; [in] pointer to input buffer
+%define %%DATA_OFFSET %4 ; [in] data offset
+%define %%CTR_BE %5 ; [in/out] ZMM counter blocks (last 4) in big-endian
+%define %%CTR_CHECK %6 ; [in/out] GP with 8-bit counter for overflow check
+%define %%HASHKEY_OFFSET %7 ; [in] numerical offset for the highest hash key
+%define %%AESOUT_BLK_OFFSET %8 ; [in] numerical offset for AES-CTR out
+%define %%GHASHIN_BLK_OFFSET %9 ; [in] numerical offset for GHASH blocks in
+%define %%SHFMSK %10 ; [in] ZMM with byte swap mask for pshufb
+%define %%ZT1 %11 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT2 %12 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT3 %13 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT4 %14 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT5 %15 ; [clobbered/out] temporary ZMM or GHASH OUT (final_reduction)
+%define %%ZT6 %16 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT7 %17 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT8 %18 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT9 %19 ; [clobbered] temporary ZMM (cipher)
+%define %%ZT10 %20 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT11 %21 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT12 %22 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT13 %23 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT14 %24 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT15 %25 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT16 %26 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT17 %27 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT18 %28 ; [clobbered] temporary ZMM (ghash)
+%define %%ZT19 %29 ; [clobbered] temporary ZMM
+%define %%ZT20 %30 ; [clobbered] temporary ZMM
+%define %%ZT21 %31 ; [clobbered] temporary ZMM
+%define %%ZT22 %32 ; [clobbered] temporary ZMM
+%define %%ZT23 %33 ; [clobbered] temporary ZMM
+%define %%ADDBE_4x4 %34 ; [in] ZMM with 4x128bits 4 in big-endian
+%define %%ADDBE_1234 %35 ; [in] ZMM with 4x128bits 1, 2, 3 and 4 in big-endian
+%define %%TO_REDUCE_L %36 ; [in/out] ZMM for low 4x128-bit GHASH sum
+%define %%TO_REDUCE_H %37 ; [in/out] ZMM for hi 4x128-bit GHASH sum
+%define %%TO_REDUCE_M %38 ; [in/out] ZMM for medium 4x128-bit GHASH sum
+%define %%DO_REDUCTION %39 ; [in] "no_reduction", "final_reduction", "first_time"
+%define %%ENC_DEC %40 ; [in] cipher direction
+%define %%DATA_DISPL %41 ; [in] fixed numerical data displacement/offset
+%define %%GHASH_IN %42 ; [in] current GHASH value or "no_ghash_in"
+
+%define %%B00_03 %%ZT1
+%define %%B04_07 %%ZT2
+%define %%B08_11 %%ZT3
+%define %%B12_15 %%ZT4
+
+%define %%GH1H %%ZT5 ; @note: do not change this mapping
+%define %%GH1L %%ZT6
+%define %%GH1M %%ZT7
+%define %%GH1T %%ZT8
+
+%define %%GH2H %%ZT9
+%define %%GH2L %%ZT10
+%define %%GH2M %%ZT11
+%define %%GH2T %%ZT12
+
+%define %%RED_POLY %%GH2T
+%define %%RED_P1 %%GH2L
+%define %%RED_T1 %%GH2H
+%define %%RED_T2 %%GH2M
+
+%define %%GH3H %%ZT13
+%define %%GH3L %%ZT14
+%define %%GH3M %%ZT15
+%define %%GH3T %%ZT16
+
+%define %%DATA1 %%ZT13
+%define %%DATA2 %%ZT14
+%define %%DATA3 %%ZT15
+%define %%DATA4 %%ZT16
+
+%define %%AESKEY1 %%ZT17
+%define %%AESKEY2 %%ZT18
+
+%define %%GHKEY1 %%ZT19
+%define %%GHKEY2 %%ZT20
+%define %%GHDAT1 %%ZT21
+%define %%GHDAT2 %%ZT22
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; prepare counter blocks
+
+ cmp BYTE(%%CTR_CHECK), (256 - 16)
+ jae %%_16_blocks_overflow
+ vpaddd %%B00_03, %%CTR_BE, %%ADDBE_1234
+ vpaddd %%B04_07, %%B00_03, %%ADDBE_4x4
+ vpaddd %%B08_11, %%B04_07, %%ADDBE_4x4
+ vpaddd %%B12_15, %%B08_11, %%ADDBE_4x4
+ jmp %%_16_blocks_ok
+%%_16_blocks_overflow:
+ vpshufb %%CTR_BE, %%CTR_BE, %%SHFMSK
+ vmovdqa64 %%B12_15, [rel ddq_add_4444]
+ vpaddd %%B00_03, %%CTR_BE, [rel ddq_add_1234]
+ vpaddd %%B04_07, %%B00_03, %%B12_15
+ vpaddd %%B08_11, %%B04_07, %%B12_15
+ vpaddd %%B12_15, %%B08_11, %%B12_15
+ vpshufb %%B00_03, %%SHFMSK
+ vpshufb %%B04_07, %%SHFMSK
+ vpshufb %%B08_11, %%SHFMSK
+ vpshufb %%B12_15, %%SHFMSK
+%%_16_blocks_ok:
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; pre-load constants
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 0)]
+%ifnidn %%GHASH_IN, no_ghash_in
+ vpxorq %%GHDAT1, %%GHASH_IN, [rsp + %%GHASHIN_BLK_OFFSET + (0*64)]
+%else
+ vmovdqa64 %%GHDAT1, [rsp + %%GHASHIN_BLK_OFFSET + (0*64)]
+%endif
+ vmovdqu64 %%GHKEY1, [%%GDATA + %%HASHKEY_OFFSET + (0*64)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; save counter for the next round
+ ;; increment counter overflow check register
+ vshufi64x2 %%CTR_BE, %%B12_15, %%B12_15, 1111_1111b
+ add BYTE(%%CTR_CHECK), 16
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; pre-load constants
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 1)]
+ vmovdqu64 %%GHKEY2, [%%GDATA + %%HASHKEY_OFFSET + (1*64)]
+ vmovdqa64 %%GHDAT2, [rsp + %%GHASHIN_BLK_OFFSET + (1*64)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; stitch AES rounds with GHASH
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 0 - ARK
+
+ vpxorq %%B00_03, %%AESKEY1
+ vpxorq %%B04_07, %%AESKEY1
+ vpxorq %%B08_11, %%AESKEY1
+ vpxorq %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 2)]
+
+ ;;==================================================
+ ;; GHASH 4 blocks (15 to 12)
+ vpclmulqdq %%GH1H, %%GHDAT1, %%GHKEY1, 0x11 ; a1*b1
+ vpclmulqdq %%GH1L, %%GHDAT1, %%GHKEY1, 0x00 ; a0*b0
+ vpclmulqdq %%GH1M, %%GHDAT1, %%GHKEY1, 0x01 ; a1*b0
+ vpclmulqdq %%GH1T, %%GHDAT1, %%GHKEY1, 0x10 ; a0*b1
+
+ vmovdqu64 %%GHKEY1, [%%GDATA + %%HASHKEY_OFFSET + (2*64)]
+ vmovdqa64 %%GHDAT1, [rsp + %%GHASHIN_BLK_OFFSET + (2*64)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 1
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 3)]
+
+ ;; =================================================
+ ;; GHASH 4 blocks (11 to 8)
+ vpclmulqdq %%GH2M, %%GHDAT2, %%GHKEY2, 0x10 ; a0*b1
+ vpclmulqdq %%GH2T, %%GHDAT2, %%GHKEY2, 0x01 ; a1*b0
+ vpclmulqdq %%GH2H, %%GHDAT2, %%GHKEY2, 0x11 ; a1*b1
+ vpclmulqdq %%GH2L, %%GHDAT2, %%GHKEY2, 0x00 ; a0*b0
+
+ vmovdqu64 %%GHKEY2, [%%GDATA + %%HASHKEY_OFFSET + (3*64)]
+ vmovdqa64 %%GHDAT2, [rsp + %%GHASHIN_BLK_OFFSET + (3*64)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 2
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 4)]
+
+ ;; =================================================
+ ;; GHASH 4 blocks (7 to 4)
+ vpclmulqdq %%GH3M, %%GHDAT1, %%GHKEY1, 0x10 ; a0*b1
+ vpclmulqdq %%GH3T, %%GHDAT1, %%GHKEY1, 0x01 ; a1*b0
+ vpclmulqdq %%GH3H, %%GHDAT1, %%GHKEY1, 0x11 ; a1*b1
+ vpclmulqdq %%GH3L, %%GHDAT1, %%GHKEY1, 0x00 ; a0*b0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES rounds 3
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 5)]
+
+ ;; =================================================
+ ;; Gather (XOR) GHASH for 12 blocks
+ vpternlogq %%GH1H, %%GH2H, %%GH3H, 0x96
+ vpternlogq %%GH1L, %%GH2L, %%GH3L, 0x96
+ vpternlogq %%GH1T, %%GH2T, %%GH3T, 0x96
+ vpternlogq %%GH1M, %%GH2M, %%GH3M, 0x96
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES rounds 4
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 6)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; load plain/cipher text (recycle GH3xx registers)
+ VX512LDR %%DATA1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + %%DATA_DISPL + (0 * 64)]
+ VX512LDR %%DATA2, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + %%DATA_DISPL + (1 * 64)]
+ VX512LDR %%DATA3, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + %%DATA_DISPL + (2 * 64)]
+ VX512LDR %%DATA4, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + %%DATA_DISPL + (3 * 64)]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES rounds 5
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 7)]
+
+ ;; =================================================
+ ;; GHASH 4 blocks (3 to 0)
+ vpclmulqdq %%GH2M, %%GHDAT2, %%GHKEY2, 0x10 ; a0*b1
+ vpclmulqdq %%GH2T, %%GHDAT2, %%GHKEY2, 0x01 ; a1*b0
+ vpclmulqdq %%GH2H, %%GHDAT2, %%GHKEY2, 0x11 ; a1*b1
+ vpclmulqdq %%GH2L, %%GHDAT2, %%GHKEY2, 0x00 ; a0*b0
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 6
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 8)]
+
+ ;; =================================================
+ ;; gather GHASH in GH1L (low) and GH1H (high)
+%ifidn %%DO_REDUCTION, first_time
+ vpternlogq %%GH1M, %%GH1T, %%GH2T, 0x96 ; TM
+ vpxorq %%TO_REDUCE_M, %%GH1M, %%GH2M ; TM
+ vpxorq %%TO_REDUCE_H, %%GH1H, %%GH2H ; TH
+ vpxorq %%TO_REDUCE_L, %%GH1L, %%GH2L ; TL
+%endif
+%ifidn %%DO_REDUCTION, no_reduction
+ vpternlogq %%GH1M, %%GH1T, %%GH2T, 0x96 ; TM
+ vpternlogq %%TO_REDUCE_M, %%GH1M, %%GH2M, 0x96 ; TM
+ vpternlogq %%TO_REDUCE_H, %%GH1H, %%GH2H, 0x96 ; TH
+ vpternlogq %%TO_REDUCE_L, %%GH1L, %%GH2L, 0x96 ; TL
+%endif
+%ifidn %%DO_REDUCTION, final_reduction
+ ;; phase 1: add mid products together
+ ;; also load polynomial constant for reduction
+ vpternlogq %%GH1M, %%GH1T, %%GH2T, 0x96 ; TM
+ vpternlogq %%GH1M, %%TO_REDUCE_M, %%GH2M, 0x96
+
+ vpsrldq %%GH2M, %%GH1M, 8
+ vpslldq %%GH1M, %%GH1M, 8
+
+ vmovdqa64 XWORD(%%RED_POLY), [rel POLY2]
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 7
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 9)]
+
+ ;; =================================================
+ ;; Add mid product to high and low
+%ifidn %%DO_REDUCTION, final_reduction
+ vpternlogq %%GH1H, %%GH2H, %%GH2M, 0x96 ; TH = TH1 + TH2 + TM>>64
+ vpxorq %%GH1H, %%TO_REDUCE_H
+ vpternlogq %%GH1L, %%GH2L, %%GH1M, 0x96 ; TL = TL1 + TL2 + TM<<64
+ vpxorq %%GH1L, %%TO_REDUCE_L
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 8
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 10)]
+
+ ;; =================================================
+ ;; horizontal xor of low and high 4x128
+%ifidn %%DO_REDUCTION, final_reduction
+ VHPXORI4x128 %%GH1H, %%GH2H
+ VHPXORI4x128 %%GH1L, %%GH2L
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES round 9
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+%if (NROUNDS >= 11)
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 11)]
+%endif
+ ;; =================================================
+ ;; first phase of reduction
+%ifidn %%DO_REDUCTION, final_reduction
+ vpclmulqdq XWORD(%%RED_P1), XWORD(%%RED_POLY), XWORD(%%GH1L), 0x01
+ vpslldq XWORD(%%RED_P1), XWORD(%%RED_P1), 8 ; shift-L 2 DWs
+ vpxorq XWORD(%%RED_P1), XWORD(%%GH1L), XWORD(%%RED_P1) ; first phase of the reduct
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; AES rounds up to 11 (AES192) or 13 (AES256)
+ ;; AES128 is done
+%if (NROUNDS >= 11)
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 12)]
+
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+%if (NROUNDS == 13)
+ vbroadcastf64x2 %%AESKEY2, [%%GDATA + (16 * 13)]
+
+ vaesenc %%B00_03, %%B00_03, %%AESKEY1
+ vaesenc %%B04_07, %%B04_07, %%AESKEY1
+ vaesenc %%B08_11, %%B08_11, %%AESKEY1
+ vaesenc %%B12_15, %%B12_15, %%AESKEY1
+ vbroadcastf64x2 %%AESKEY1, [%%GDATA + (16 * 14)]
+
+ vaesenc %%B00_03, %%B00_03, %%AESKEY2
+ vaesenc %%B04_07, %%B04_07, %%AESKEY2
+ vaesenc %%B08_11, %%B08_11, %%AESKEY2
+ vaesenc %%B12_15, %%B12_15, %%AESKEY2
+%endif ; GCM256 / NROUNDS = 13 (15 including the first and the last)
+%endif ; GCM192 / NROUNDS = 11 (13 including the first and the last)
+
+ ;; =================================================
+ ;; second phase of the reduction
+%ifidn %%DO_REDUCTION, final_reduction
+ vpclmulqdq XWORD(%%RED_T1), XWORD(%%RED_POLY), XWORD(%%RED_P1), 0x00
+ vpsrldq XWORD(%%RED_T1), XWORD(%%RED_T1), 4 ; shift-R 1-DW to obtain 2-DWs shift-R
+
+ vpclmulqdq XWORD(%%RED_T2), XWORD(%%RED_POLY), XWORD(%%RED_P1), 0x10
+ vpslldq XWORD(%%RED_T2), XWORD(%%RED_T2), 4 ; shift-L 1-DW for result without shifts
+ ;; GH1H = GH1H x RED_T1 x RED_T2
+ vpternlogq XWORD(%%GH1H), XWORD(%%RED_T2), XWORD(%%RED_T1), 0x96
+%endif
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; the last AES round
+ vaesenclast %%B00_03, %%B00_03, %%AESKEY1
+ vaesenclast %%B04_07, %%B04_07, %%AESKEY1
+ vaesenclast %%B08_11, %%B08_11, %%AESKEY1
+ vaesenclast %%B12_15, %%B12_15, %%AESKEY1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; XOR against plain/cipher text
+ vpxorq %%B00_03, %%B00_03, %%DATA1
+ vpxorq %%B04_07, %%B04_07, %%DATA2
+ vpxorq %%B08_11, %%B08_11, %%DATA3
+ vpxorq %%B12_15, %%B12_15, %%DATA4
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; store cipher/plain text
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + %%DATA_DISPL + (0 * 64)], %%B00_03
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + %%DATA_DISPL + (1 * 64)], %%B04_07
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + %%DATA_DISPL + (2 * 64)], %%B08_11
+ VX512STR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + %%DATA_DISPL + (3 * 64)], %%B12_15
+
+ ;; =================================================
+ ;; shuffle cipher text blocks for GHASH computation
+%ifidn %%ENC_DEC, ENC
+ vpshufb %%B00_03, %%B00_03, %%SHFMSK
+ vpshufb %%B04_07, %%B04_07, %%SHFMSK
+ vpshufb %%B08_11, %%B08_11, %%SHFMSK
+ vpshufb %%B12_15, %%B12_15, %%SHFMSK
+%else
+ vpshufb %%B00_03, %%DATA1, %%SHFMSK
+ vpshufb %%B04_07, %%DATA2, %%SHFMSK
+ vpshufb %%B08_11, %%DATA3, %%SHFMSK
+ vpshufb %%B12_15, %%DATA4, %%SHFMSK
+%endif
+
+ ;; =================================================
+ ;; store shuffled cipher text for ghashing
+ vmovdqa64 [rsp + %%AESOUT_BLK_OFFSET + (0*64)], %%B00_03
+ vmovdqa64 [rsp + %%AESOUT_BLK_OFFSET + (1*64)], %%B04_07
+ vmovdqa64 [rsp + %%AESOUT_BLK_OFFSET + (2*64)], %%B08_11
+ vmovdqa64 [rsp + %%AESOUT_BLK_OFFSET + (3*64)], %%B12_15
+
+%ifidn %%DO_REDUCTION, final_reduction
+ ;; =================================================
+ ;; Return GHASH value through %%GH1H
+%endif
+
+%endmacro ; GHASH_16_ENCRYPT_16_PARALLEL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; GHASH the last 8 ciphertext blocks.
+;;; - optionally accepts GHASH product sums as input
+%macro GHASH_LAST_8 10-13
+%define %%GDATA %1 ; [in] key pointer
+%define %%BL47 %2 ; [in/clobbered] ZMM AES blocks 4 to 7
+%define %%BL03 %3 ; [in/cloberred] ZMM AES blocks 0 to 3
+%define %%ZTH %4 ; [cloberred] ZMM temporary
+%define %%ZTM %5 ; [cloberred] ZMM temporary
+%define %%ZTL %6 ; [cloberred] ZMM temporary
+%define %%ZT01 %7 ; [cloberred] ZMM temporary
+%define %%ZT02 %8 ; [cloberred] ZMM temporary
+%define %%ZT03 %9 ; [cloberred] ZMM temporary
+%define %%AAD_HASH %10 ; [out] XMM hash value
+%define %%GH %11 ; [in/optional] ZMM with GHASH high product sum
+%define %%GL %12 ; [in/optional] ZMM with GHASH low product sum
+%define %%GM %13 ; [in/optional] ZMM with GHASH mid product sum
+
+ VCLMUL_STEP1 %%GDATA, %%BL47, %%ZT01, %%ZTH, %%ZTM, %%ZTL
+
+%if %0 > 10
+ ;; add optional sums before step2
+ vpxorq %%ZTH, %%ZTH, %%GH
+ vpxorq %%ZTL, %%ZTL, %%GL
+ vpxorq %%ZTM, %%ZTM, %%GM
+%endif
+
+ VCLMUL_STEP2 %%GDATA, %%BL47, %%BL03, %%ZT01, %%ZT02, %%ZT03, %%ZTH, %%ZTM, %%ZTL
+
+ vmovdqa64 XWORD(%%ZT03), [rel POLY2]
+ VCLMUL_REDUCE %%AAD_HASH, XWORD(%%ZT03), XWORD(%%BL47), XWORD(%%BL03), \
+ XWORD(%%ZT01), XWORD(%%ZT02)
+%endmacro ; GHASH_LAST_8
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; GHASH the last 7 cipher text blocks.
+;;; - it uses same GHASH macros as GHASH_LAST_8 but with some twist
+;;; - it loads GHASH keys for each of the data blocks, so that:
+;;; - blocks 4, 5 and 6 will use GHASH keys 3, 2, 1 respectively
+;;; - code ensures that unused block 7 and corresponding GHASH key are zeroed
+;;; (clmul product is zero this way and will not affect the result)
+;;; - blocks 0, 1, 2 and 3 will use USE GHASH keys 7, 6, 5 and 4 respectively
+;;; - optionally accepts GHASH product sums as input
+%macro GHASH_LAST_7 13-16
+%define %%GDATA %1 ; [in] key pointer
+%define %%BL47 %2 ; [in/clobbered] ZMM AES blocks 4 to 7
+%define %%BL03 %3 ; [in/cloberred] ZMM AES blocks 0 to 3
+%define %%ZTH %4 ; [cloberred] ZMM temporary
+%define %%ZTM %5 ; [cloberred] ZMM temporary
+%define %%ZTL %6 ; [cloberred] ZMM temporary
+%define %%ZT01 %7 ; [cloberred] ZMM temporary
+%define %%ZT02 %8 ; [cloberred] ZMM temporary
+%define %%ZT03 %9 ; [cloberred] ZMM temporary
+%define %%ZT04 %10 ; [cloberred] ZMM temporary
+%define %%AAD_HASH %11 ; [out] XMM hash value
+%define %%MASKREG %12 ; [clobbered] mask register to use for loads
+%define %%IA0 %13 ; [clobbered] GP temporary register
+%define %%GH %14 ; [in/optional] ZMM with GHASH high product sum
+%define %%GL %15 ; [in/optional] ZMM with GHASH low product sum
+%define %%GM %16 ; [in/optional] ZMM with GHASH mid product sum
+
+ vmovdqa64 XWORD(%%ZT04), [rel POLY2]
+
+ VCLMUL_1_TO_8_STEP1 %%GDATA, %%BL47, %%ZT01, %%ZT02, %%ZTH, %%ZTM, %%ZTL, 7
+
+%if %0 > 13
+ ;; add optional sums before step2
+ vpxorq %%ZTH, %%ZTH, %%GH
+ vpxorq %%ZTL, %%ZTL, %%GL
+ vpxorq %%ZTM, %%ZTM, %%GM
+%endif
+
+ VCLMUL_1_TO_8_STEP2 %%GDATA, %%BL47, %%BL03, \
+ %%ZT01, %%ZT02, %%ZT03, \
+ %%ZTH, %%ZTM, %%ZTL, 7
+
+ VCLMUL_REDUCE %%AAD_HASH, XWORD(%%ZT04), XWORD(%%BL47), XWORD(%%BL03), \
+ XWORD(%%ZT01), XWORD(%%ZT02)
+%endmacro ; GHASH_LAST_7
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Encryption of a single block
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ vpxorq %%XMM0, %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ vaesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep
+ vaesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Save register content for the caller
+%macro FUNC_SAVE 0
+ ;; Required for Update/GMC_ENC
+ ;the number of pushes must equal STACK_OFFSET
+ mov rax, rsp
+
+ sub rsp, STACK_FRAME_SIZE
+ and rsp, ~63
+
+ mov [rsp + STACK_GP_OFFSET + 0*8], r12
+ mov [rsp + STACK_GP_OFFSET + 1*8], r13
+ mov [rsp + STACK_GP_OFFSET + 2*8], r14
+ mov [rsp + STACK_GP_OFFSET + 3*8], r15
+ mov [rsp + STACK_GP_OFFSET + 4*8], rax ; stack
+ mov r14, rax ; r14 is used to retrieve stack args
+ mov [rsp + STACK_GP_OFFSET + 5*8], rbp
+ mov [rsp + STACK_GP_OFFSET + 6*8], rbx
+%ifidn __OUTPUT_FORMAT__, win64
+ mov [rsp + STACK_GP_OFFSET + 7*8], rdi
+ mov [rsp + STACK_GP_OFFSET + 8*8], rsi
+%endif
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ vmovdqu [rsp + STACK_XMM_OFFSET + 0*16], xmm6
+ vmovdqu [rsp + STACK_XMM_OFFSET + 1*16], xmm7
+ vmovdqu [rsp + STACK_XMM_OFFSET + 2*16], xmm8
+ vmovdqu [rsp + STACK_XMM_OFFSET + 3*16], xmm9
+ vmovdqu [rsp + STACK_XMM_OFFSET + 4*16], xmm10
+ vmovdqu [rsp + STACK_XMM_OFFSET + 5*16], xmm11
+ vmovdqu [rsp + STACK_XMM_OFFSET + 6*16], xmm12
+ vmovdqu [rsp + STACK_XMM_OFFSET + 7*16], xmm13
+ vmovdqu [rsp + STACK_XMM_OFFSET + 8*16], xmm14
+ vmovdqu [rsp + STACK_XMM_OFFSET + 9*16], xmm15
+%endif
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Restore register content for the caller
+%macro FUNC_RESTORE 0
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_zmms_asm
+%else
+ vzeroupper
+%endif
+
+%ifidn __OUTPUT_FORMAT__, win64
+ vmovdqu xmm15, [rsp + STACK_XMM_OFFSET + 9*16]
+ vmovdqu xmm14, [rsp + STACK_XMM_OFFSET + 8*16]
+ vmovdqu xmm13, [rsp + STACK_XMM_OFFSET + 7*16]
+ vmovdqu xmm12, [rsp + STACK_XMM_OFFSET + 6*16]
+ vmovdqu xmm11, [rsp + STACK_XMM_OFFSET + 5*16]
+ vmovdqu xmm10, [rsp + STACK_XMM_OFFSET + 4*16]
+ vmovdqu xmm9, [rsp + STACK_XMM_OFFSET + 3*16]
+ vmovdqu xmm8, [rsp + STACK_XMM_OFFSET + 2*16]
+ vmovdqu xmm7, [rsp + STACK_XMM_OFFSET + 1*16]
+ vmovdqu xmm6, [rsp + STACK_XMM_OFFSET + 0*16]
+%endif
+
+ ;; Required for Update/GMC_ENC
+ mov rbp, [rsp + STACK_GP_OFFSET + 5*8]
+ mov rbx, [rsp + STACK_GP_OFFSET + 6*8]
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rdi, [rsp + STACK_GP_OFFSET + 7*8]
+ mov rsi, [rsp + STACK_GP_OFFSET + 8*8]
+%endif
+ mov r12, [rsp + STACK_GP_OFFSET + 0*8]
+ mov r13, [rsp + STACK_GP_OFFSET + 1*8]
+ mov r14, [rsp + STACK_GP_OFFSET + 2*8]
+ mov r15, [rsp + STACK_GP_OFFSET + 3*8]
+ mov rsp, [rsp + STACK_GP_OFFSET + 4*8] ; stack
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; GCM_INIT initializes a gcm_context_data struct to prepare for encoding/decoding.
+;;; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), IV,
+;;; Additional Authentication data (A_IN), Additional Data length (A_LEN).
+;;; Output: Updated GDATA_CTX with the hash of A_IN (AadHash) and initialized other parts of GDATA_CTX.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_INIT 21
+%define %%GDATA_KEY %1 ; [in] GCM expanded keys pointer
+%define %%GDATA_CTX %2 ; [in] GCM context pointer
+%define %%IV %3 ; [in] IV pointer
+%define %%A_IN %4 ; [in] AAD pointer
+%define %%A_LEN %5 ; [in] AAD length in bytes
+%define %%GPR1 %6 ; [clobbered] GP register
+%define %%GPR2 %7 ; [clobbered] GP register
+%define %%GPR3 %8 ; [clobbered] GP register
+%define %%MASKREG %9 ; [clobbered] mask register
+%define %%AAD_HASH %10 ; [out] XMM for AAD_HASH value (xmm14)
+%define %%CUR_COUNT %11 ; [out] XMM with current counter (xmm2)
+%define %%ZT0 %12 ; [clobbered] ZMM register
+%define %%ZT1 %13 ; [clobbered] ZMM register
+%define %%ZT2 %14 ; [clobbered] ZMM register
+%define %%ZT3 %15 ; [clobbered] ZMM register
+%define %%ZT4 %16 ; [clobbered] ZMM register
+%define %%ZT5 %17 ; [clobbered] ZMM register
+%define %%ZT6 %18 ; [clobbered] ZMM register
+%define %%ZT7 %19 ; [clobbered] ZMM register
+%define %%ZT8 %20 ; [clobbered] ZMM register
+%define %%ZT9 %21 ; [clobbered] ZMM register
+
+ CALC_AAD_HASH %%A_IN, %%A_LEN, %%AAD_HASH, %%GDATA_KEY, \
+ %%ZT0, %%ZT1, %%ZT2, %%ZT3, %%ZT4, %%ZT5, %%ZT6, %%ZT7, %%ZT8, %%ZT9, \
+ %%GPR1, %%GPR2, %%GPR3, %%MASKREG
+
+ mov %%GPR1, %%A_LEN
+ vmovdqu64 [%%GDATA_CTX + AadHash], %%AAD_HASH ; ctx.aad hash = aad_hash
+ mov [%%GDATA_CTX + AadLen], %%GPR1 ; ctx.aad_length = aad_length
+
+ xor %%GPR1, %%GPR1
+ mov [%%GDATA_CTX + InLen], %%GPR1 ; ctx.in_length = 0
+ mov [%%GDATA_CTX + PBlockLen], %%GPR1 ; ctx.partial_block_length = 0
+
+ ;; read 12 IV bytes and pad with 0x00000001
+ vmovdqu8 %%CUR_COUNT, [rel ONEf]
+ mov %%GPR2, %%IV
+ mov %%GPR1, 0x0000_0000_0000_0fff
+ kmovq %%MASKREG, %%GPR1
+ vmovdqu8 %%CUR_COUNT{%%MASKREG}, [%%GPR2] ; ctr = IV | 0x1
+
+ vmovdqu64 [%%GDATA_CTX + OrigIV], %%CUR_COUNT ; ctx.orig_IV = iv
+
+ ;; store IV as counter in LE format
+ vpshufb %%CUR_COUNT, [rel SHUF_MASK]
+ vmovdqu [%%GDATA_CTX + CurCount], %%CUR_COUNT ; ctx.current_counter = iv
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Cipher and ghash of payloads shorter than 256 bytes
+;;; - number of blocks in the message comes as argument
+;;; - depending on the number of blocks an optimized variant of
+;;; INITIAL_BLOCKS_PARTIAL is invoked
+%macro GCM_ENC_DEC_SMALL 42
+%define %%GDATA_KEY %1 ; [in] key pointer
+%define %%GDATA_CTX %2 ; [in] context pointer
+%define %%CYPH_PLAIN_OUT %3 ; [in] output buffer
+%define %%PLAIN_CYPH_IN %4 ; [in] input buffer
+%define %%PLAIN_CYPH_LEN %5 ; [in] buffer length
+%define %%ENC_DEC %6 ; [in] cipher direction
+%define %%DATA_OFFSET %7 ; [in] data offset
+%define %%LENGTH %8 ; [in] data length
+%define %%NUM_BLOCKS %9 ; [in] number of blocks to process 1 to 16
+%define %%CTR %10 ; [in/out] XMM counter block
+%define %%HASH_IN_OUT %11 ; [in/out] XMM GHASH value
+%define %%INSTANCE_TYPE %12 ; [in] single or multi call
+%define %%ZTMP0 %13 ; [clobbered] ZMM register
+%define %%ZTMP1 %14 ; [clobbered] ZMM register
+%define %%ZTMP2 %15 ; [clobbered] ZMM register
+%define %%ZTMP3 %16 ; [clobbered] ZMM register
+%define %%ZTMP4 %17 ; [clobbered] ZMM register
+%define %%ZTMP5 %18 ; [clobbered] ZMM register
+%define %%ZTMP6 %19 ; [clobbered] ZMM register
+%define %%ZTMP7 %20 ; [clobbered] ZMM register
+%define %%ZTMP8 %21 ; [clobbered] ZMM register
+%define %%ZTMP9 %22 ; [clobbered] ZMM register
+%define %%ZTMP10 %23 ; [clobbered] ZMM register
+%define %%ZTMP11 %24 ; [clobbered] ZMM register
+%define %%ZTMP12 %25 ; [clobbered] ZMM register
+%define %%ZTMP13 %26 ; [clobbered] ZMM register
+%define %%ZTMP14 %27 ; [clobbered] ZMM register
+%define %%ZTMP15 %28 ; [clobbered] ZMM register
+%define %%ZTMP16 %29 ; [clobbered] ZMM register
+%define %%ZTMP17 %30 ; [clobbered] ZMM register
+%define %%ZTMP18 %31 ; [clobbered] ZMM register
+%define %%ZTMP19 %32 ; [clobbered] ZMM register
+%define %%ZTMP20 %33 ; [clobbered] ZMM register
+%define %%ZTMP21 %34 ; [clobbered] ZMM register
+%define %%ZTMP22 %35 ; [clobbered] ZMM register
+%define %%GH %36 ; [in] ZMM ghash sum (high)
+%define %%GL %37 ; [in] ZMM ghash sum (low)
+%define %%GM %38 ; [in] ZMM ghash sum (middle)
+%define %%IA0 %39 ; [clobbered] GP register
+%define %%IA1 %40 ; [clobbered] GP register
+%define %%MASKREG %41 ; [clobbered] mask register
+%define %%SHUFMASK %42 ; [in] ZMM with BE/LE shuffle mask
+
+ cmp %%NUM_BLOCKS, 8
+ je %%_small_initial_num_blocks_is_8
+ jl %%_small_initial_num_blocks_is_7_1
+
+
+ cmp %%NUM_BLOCKS, 12
+ je %%_small_initial_num_blocks_is_12
+ jl %%_small_initial_num_blocks_is_11_9
+
+ ;; 16, 15, 14 or 13
+ cmp %%NUM_BLOCKS, 16
+ je %%_small_initial_num_blocks_is_16
+ cmp %%NUM_BLOCKS, 15
+ je %%_small_initial_num_blocks_is_15
+ cmp %%NUM_BLOCKS, 14
+ je %%_small_initial_num_blocks_is_14
+ jmp %%_small_initial_num_blocks_is_13
+
+%%_small_initial_num_blocks_is_11_9:
+ ;; 11, 10 or 9
+ cmp %%NUM_BLOCKS, 11
+ je %%_small_initial_num_blocks_is_11
+ cmp %%NUM_BLOCKS, 10
+ je %%_small_initial_num_blocks_is_10
+ jmp %%_small_initial_num_blocks_is_9
+
+%%_small_initial_num_blocks_is_7_1:
+ cmp %%NUM_BLOCKS, 4
+ je %%_small_initial_num_blocks_is_4
+ jl %%_small_initial_num_blocks_is_3_1
+ ;; 7, 6 or 5
+ cmp %%NUM_BLOCKS, 7
+ je %%_small_initial_num_blocks_is_7
+ cmp %%NUM_BLOCKS, 6
+ je %%_small_initial_num_blocks_is_6
+ jmp %%_small_initial_num_blocks_is_5
+
+%%_small_initial_num_blocks_is_3_1:
+ ;; 3, 2 or 1
+ cmp %%NUM_BLOCKS, 3
+ je %%_small_initial_num_blocks_is_3
+ cmp %%NUM_BLOCKS, 2
+ je %%_small_initial_num_blocks_is_2
+
+ ;; for %%NUM_BLOCKS == 1, just fall through and no 'jmp' needed
+
+ ;; Use rep to generate different block size variants
+ ;; - one block size has to be the first one
+%assign num_blocks 1
+%rep 16
+%%_small_initial_num_blocks_is_ %+ num_blocks :
+ INITIAL_BLOCKS_PARTIAL %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, \
+ %%PLAIN_CYPH_IN, %%LENGTH, %%DATA_OFFSET, num_blocks, \
+ %%CTR, %%HASH_IN_OUT, %%ENC_DEC, %%INSTANCE_TYPE, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, %%ZTMP8, %%ZTMP9, \
+ %%ZTMP10, %%ZTMP11, %%ZTMP12, %%ZTMP13, %%ZTMP14, \
+ %%ZTMP15, %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ %%GH, %%GL, %%GM, \
+ %%IA0, %%IA1, %%MASKREG, %%SHUFMASK
+%if num_blocks != 16
+ jmp %%_small_initial_blocks_encrypted
+%endif
+%assign num_blocks (num_blocks + 1)
+%endrep
+
+%%_small_initial_blocks_encrypted:
+
+%endmacro ; GCM_ENC_DEC_SMALL
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context_data struct
+; has been initialized by GCM_INIT
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: gcm_key_data struct* (GDATA_KEY), gcm_context_data *(GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN) and whether encoding or decoding (ENC_DEC).
+; Output: A cypher of the given plain text (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10-r15, and zmm0-zmm31, k1
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_ENC_DEC 7
+%define %%GDATA_KEY %1 ; [in] key pointer
+%define %%GDATA_CTX %2 ; [in] context pointer
+%define %%CYPH_PLAIN_OUT %3 ; [in] output buffer pointer
+%define %%PLAIN_CYPH_IN %4 ; [in] input buffer pointer
+%define %%PLAIN_CYPH_LEN %5 ; [in] buffer length
+%define %%ENC_DEC %6 ; [in] cipher direction
+%define %%INSTANCE_TYPE %7 ; [in] 'single_call' or 'multi_call' selection
+
+%define %%IA0 r10
+%define %%IA1 r12
+%define %%IA2 r13
+%define %%IA3 r15
+%define %%IA4 r11
+%define %%IA5 rax
+
+%define %%LENGTH %%IA2
+%define %%CTR_CHECK %%IA3
+%define %%DATA_OFFSET %%IA4
+
+%define %%HASHK_PTR %%IA5
+
+%define %%GCM_INIT_CTR_BLOCK xmm2 ; hardcoded in GCM_INIT for now
+
+%define %%AES_PARTIAL_BLOCK xmm8
+%define %%CTR_BLOCK2z zmm18
+%define %%CTR_BLOCKz zmm9
+%define %%CTR_BLOCKx xmm9
+%define %%AAD_HASHz zmm14
+%define %%AAD_HASHx xmm14
+
+;;; ZTMP0 - ZTMP12 - used in by8 code, by128/48 code and GCM_ENC_DEC_SMALL
+%define %%ZTMP0 zmm0
+%define %%ZTMP1 zmm3
+%define %%ZTMP2 zmm4
+%define %%ZTMP3 zmm5
+%define %%ZTMP4 zmm6
+%define %%ZTMP5 zmm7
+%define %%ZTMP6 zmm10
+%define %%ZTMP7 zmm11
+%define %%ZTMP8 zmm12
+%define %%ZTMP9 zmm13
+%define %%ZTMP10 zmm15
+%define %%ZTMP11 zmm16
+%define %%ZTMP12 zmm17
+
+;;; ZTMP13 - ZTMP22 - used in by128/48 code and GCM_ENC_DEC_SMALL
+;;; - some used by8 code as well through TMPxy names
+%define %%ZTMP13 zmm19
+%define %%ZTMP14 zmm20
+%define %%ZTMP15 zmm21
+%define %%ZTMP16 zmm30 ; can be used in very/big_loop part
+%define %%ZTMP17 zmm31 ; can be used in very/big_loop part
+%define %%ZTMP18 zmm1
+%define %%ZTMP19 zmm2
+%define %%ZTMP20 zmm8
+%define %%ZTMP21 zmm22
+%define %%ZTMP22 zmm23
+
+;;; Free to use: zmm24 - zmm29
+;;; - used by by128/48 and by8
+%define %%GH zmm24
+%define %%GL zmm25
+%define %%GM zmm26
+%define %%SHUF_MASK zmm29
+%define %%CTR_BLOCK_SAVE zmm28
+
+;;; - used by by128/48 code only
+%define %%ADDBE_4x4 zmm27
+%define %%ADDBE_1234 zmm28 ; conflicts with CTR_BLOCK_SAVE
+
+;; used by8 code only
+%define %%GH4KEY %%ZTMP17
+%define %%GH8KEY %%ZTMP16
+%define %%BLK0 %%ZTMP18
+%define %%BLK1 %%ZTMP19
+%define %%ADD8BE zmm27
+%define %%ADD8LE %%ZTMP13
+
+%define %%MASKREG k1
+
+%ifdef GCM_BIG_DATA
+;; reduction every 128 blocks, depth 32 blocks
+;; @note 128 blocks is the maximum capacity of the stack frame when
+;; GCM_BIG_DATA is defined
+%assign very_big_loop_nblocks 128
+%assign very_big_loop_depth 32
+%endif
+
+;; reduction every 48 blocks, depth 32 blocks
+;; @note 48 blocks is the maximum capacity of the stack frame when
+;; GCM_BIG_DATA is not defined
+%assign big_loop_nblocks 48
+%assign big_loop_depth 32
+
+;;; Macro flow:
+;;; - for message size bigger than very_big_loop_nblocks process data
+;;; with "very_big_loop" parameters
+;;; - for message size bigger than big_loop_nblocks process data
+;;; with "big_loop" parameters
+;;; - calculate the number of 16byte blocks in the message
+;;; - process (number of 16byte blocks) mod 8
+;;; '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+;;; - process 8 16 byte blocks at a time until all are done in %%_encrypt_by_8_new
+
+%ifidn __OUTPUT_FORMAT__, win64
+ cmp %%PLAIN_CYPH_LEN, 0
+%else
+ or %%PLAIN_CYPH_LEN, %%PLAIN_CYPH_LEN
+%endif
+ je %%_enc_dec_done
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+
+ ;; Update length of data processed
+%ifidn __OUTPUT_FORMAT__, win64
+ mov %%IA0, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + InLen], %%IA0
+%else
+ add [%%GDATA_CTX + InLen], %%PLAIN_CYPH_LEN
+%endif
+ vmovdqu64 %%AAD_HASHx, [%%GDATA_CTX + AadHash]
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: partial block processing makes only sense for multi_call here.
+ ;; Used for the update flow - if there was a previous partial
+ ;; block fill the remaining bytes here.
+ PARTIAL_BLOCK %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%PLAIN_CYPH_LEN, %%DATA_OFFSET, %%AAD_HASHx, %%ENC_DEC, \
+ %%IA0, %%IA1, %%IA2, %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, %%ZTMP8, %%ZTMP9, %%MASKREG
+%endif
+
+ ;; lift counter block from GCM_INIT to here
+%ifidn %%INSTANCE_TYPE, single_call
+ vmovdqu64 %%CTR_BLOCKx, %%GCM_INIT_CTR_BLOCK
+%else
+ vmovdqu64 %%CTR_BLOCKx, [%%GDATA_CTX + CurCount]
+%endif
+
+ ;; Save the amount of data left to process in %%LENGTH
+ mov %%LENGTH, %%PLAIN_CYPH_LEN
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; NOTE: %%DATA_OFFSET is zero in single_call case.
+ ;; Consequently PLAIN_CYPH_LEN will never be zero after
+ ;; %%DATA_OFFSET subtraction below.
+ ;; There may be no more data if it was consumed in the partial block.
+ sub %%LENGTH, %%DATA_OFFSET
+ je %%_enc_dec_done
+%endif ; %%INSTANCE_TYPE, multi_call
+
+ vmovdqa64 %%SHUF_MASK, [rel SHUF_MASK]
+ vmovdqa64 %%ADDBE_4x4, [rel ddq_addbe_4444]
+
+%ifdef GCM_BIG_DATA
+ vmovdqa64 %%ADDBE_1234, [rel ddq_addbe_1234]
+
+ cmp %%LENGTH, (very_big_loop_nblocks * 16)
+ jl %%_message_below_very_big_nblocks
+
+ INITIAL_BLOCKS_Nx16 %%PLAIN_CYPH_IN, %%CYPH_PLAIN_OUT, %%GDATA_KEY, %%DATA_OFFSET, \
+ %%AAD_HASHz, %%CTR_BLOCKz, %%CTR_CHECK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ %%GH, %%GL, %%GM, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%SHUF_MASK, %%ENC_DEC, very_big_loop_nblocks, very_big_loop_depth
+
+ sub %%LENGTH, (very_big_loop_nblocks * 16)
+ cmp %%LENGTH, (very_big_loop_nblocks * 16)
+ jl %%_no_more_very_big_nblocks
+
+%%_encrypt_very_big_nblocks:
+ GHASH_ENCRYPT_Nx16_PARALLEL \
+ %%PLAIN_CYPH_IN, %%CYPH_PLAIN_OUT, %%GDATA_KEY, %%DATA_OFFSET, \
+ %%CTR_BLOCKz, %%SHUF_MASK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ %%GH, %%GL, %%GM, \
+ %%ADDBE_4x4, %%ADDBE_1234, %%AAD_HASHz, \
+ %%ENC_DEC, very_big_loop_nblocks, very_big_loop_depth, %%CTR_CHECK
+
+ sub %%LENGTH, (very_big_loop_nblocks * 16)
+ cmp %%LENGTH, (very_big_loop_nblocks * 16)
+ jge %%_encrypt_very_big_nblocks
+
+%%_no_more_very_big_nblocks:
+ vpshufb %%CTR_BLOCKx, XWORD(%%SHUF_MASK)
+ vmovdqa64 XWORD(%%CTR_BLOCK_SAVE), %%CTR_BLOCKx
+
+ GHASH_LAST_Nx16 %%GDATA_KEY, %%AAD_HASHz, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%GH, %%GL, %%GM, very_big_loop_nblocks, very_big_loop_depth
+
+ or %%LENGTH, %%LENGTH
+ jz %%_ghash_done
+
+%%_message_below_very_big_nblocks:
+%endif ; GCM_BIG_DATA
+
+ cmp %%LENGTH, (big_loop_nblocks * 16)
+ jl %%_message_below_big_nblocks
+
+ ;; overwritten above by CTR_BLOCK_SAVE
+ vmovdqa64 %%ADDBE_1234, [rel ddq_addbe_1234]
+
+ INITIAL_BLOCKS_Nx16 %%PLAIN_CYPH_IN, %%CYPH_PLAIN_OUT, %%GDATA_KEY, %%DATA_OFFSET, \
+ %%AAD_HASHz, %%CTR_BLOCKz, %%CTR_CHECK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ %%GH, %%GL, %%GM, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%SHUF_MASK, %%ENC_DEC, big_loop_nblocks, big_loop_depth
+
+ sub %%LENGTH, (big_loop_nblocks * 16)
+ cmp %%LENGTH, (big_loop_nblocks * 16)
+ jl %%_no_more_big_nblocks
+
+%%_encrypt_big_nblocks:
+ GHASH_ENCRYPT_Nx16_PARALLEL \
+ %%PLAIN_CYPH_IN, %%CYPH_PLAIN_OUT, %%GDATA_KEY, %%DATA_OFFSET, \
+ %%CTR_BLOCKz, %%SHUF_MASK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ %%GH, %%GL, %%GM, \
+ %%ADDBE_4x4, %%ADDBE_1234, %%AAD_HASHz, \
+ %%ENC_DEC, big_loop_nblocks, big_loop_depth, %%CTR_CHECK
+
+ sub %%LENGTH, (big_loop_nblocks * 16)
+ cmp %%LENGTH, (big_loop_nblocks * 16)
+ jge %%_encrypt_big_nblocks
+
+%%_no_more_big_nblocks:
+ vpshufb %%CTR_BLOCKx, XWORD(%%SHUF_MASK)
+ vmovdqa64 XWORD(%%CTR_BLOCK_SAVE), %%CTR_BLOCKx
+
+ GHASH_LAST_Nx16 %%GDATA_KEY, %%AAD_HASHz, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%GH, %%GL, %%GM, big_loop_nblocks, big_loop_depth
+
+ or %%LENGTH, %%LENGTH
+ jz %%_ghash_done
+
+%%_message_below_big_nblocks:
+
+ ;; Less than 256 bytes will be handled by the small message code, which
+ ;; can process up to 16 x blocks (16 bytes each)
+ cmp %%LENGTH, (16 * 16)
+ jge %%_large_message_path
+
+ ;; Determine how many blocks to process
+ ;; - process one additional block if there is a partial block
+ mov %%IA1, %%LENGTH
+ add %%IA1, 15
+ shr %%IA1, 4
+ ;; %%IA1 can be in the range from 0 to 16
+
+ GCM_ENC_DEC_SMALL \
+ %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%PLAIN_CYPH_LEN, %%ENC_DEC, %%DATA_OFFSET, \
+ %%LENGTH, %%IA1, %%CTR_BLOCKx, %%AAD_HASHx, %%INSTANCE_TYPE, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, \
+ %%ZTMP4, %%ZTMP5, %%ZTMP6, %%ZTMP7, \
+ %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%ZTMP12, %%ZTMP13, %%ZTMP14, %%ZTMP15, \
+ %%ZTMP16, %%ZTMP17, %%ZTMP18, %%ZTMP19, \
+ %%ZTMP20, %%ZTMP21, %%ZTMP22, \
+ no_zmm, no_zmm, no_zmm, \
+ %%IA0, %%IA3, %%MASKREG, %%SHUF_MASK
+
+ vmovdqa64 XWORD(%%CTR_BLOCK_SAVE), %%CTR_BLOCKx
+
+ jmp %%_ghash_done
+
+%%_large_message_path:
+ ;; Determine how many blocks to process in INITIAL
+ ;; - process one additional block in INITIAL if there is a partial block
+ mov %%IA1, %%LENGTH
+ and %%IA1, 0xff
+ add %%IA1, 15
+ shr %%IA1, 4
+ ;; Don't allow 8 INITIAL blocks since this will
+ ;; be handled by the x8 partial loop.
+ and %%IA1, 7
+ je %%_initial_num_blocks_is_0
+ cmp %%IA1, 1
+ je %%_initial_num_blocks_is_1
+ cmp %%IA1, 2
+ je %%_initial_num_blocks_is_2
+ cmp %%IA1, 3
+ je %%_initial_num_blocks_is_3
+ cmp %%IA1, 4
+ je %%_initial_num_blocks_is_4
+ cmp %%IA1, 5
+ je %%_initial_num_blocks_is_5
+ cmp %%IA1, 6
+ je %%_initial_num_blocks_is_6
+
+%assign number_of_blocks 7
+%rep 8
+%%_initial_num_blocks_is_ %+ number_of_blocks:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%LENGTH, %%DATA_OFFSET, number_of_blocks, %%CTR_BLOCKx, %%AAD_HASHz, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, \
+ %%ZTMP5, %%ZTMP6, %%ZTMP7, %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, \
+ %%IA0, %%IA1, %%ENC_DEC, %%MASKREG, %%SHUF_MASK, no_partial_block
+%if number_of_blocks != 0
+ jmp %%_initial_blocks_encrypted
+%endif
+%assign number_of_blocks (number_of_blocks - 1)
+%endrep
+
+%%_initial_blocks_encrypted:
+ vmovdqa64 XWORD(%%CTR_BLOCK_SAVE), %%CTR_BLOCKx
+
+ ;; move cipher blocks from intial blocks to input of by8 macro
+ ;; and for GHASH_LAST_8/7
+ ;; - ghash value already xor'ed into block 0
+ vmovdqa64 %%BLK0, %%ZTMP0
+ vmovdqa64 %%BLK1, %%ZTMP1
+
+ ;; The entire message cannot get processed in INITIAL_BLOCKS
+ ;; - GCM_ENC_DEC_SMALL handles up to 16 blocks
+ ;; - INITIAL_BLOCKS processes up to 15 blocks
+ ;; - no need to check for zero length at this stage
+
+ ;; In order to have only one reduction at the end
+ ;; start HASH KEY pointer needs to be determined based on length and
+ ;; call type.
+ ;; - note that 8 blocks are already ciphered in INITIAL_BLOCKS and
+ ;; subtracted from LENGTH
+ lea %%IA1, [%%LENGTH + (8 * 16)]
+ add %%IA1, 15
+ and %%IA1, 0x3f0
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; if partial block and multi_call then change hash key start by one
+ mov %%IA0, %%LENGTH
+ and %%IA0, 15
+ add %%IA0, 15
+ and %%IA0, 16
+ sub %%IA1, %%IA0
+%endif
+ lea %%HASHK_PTR, [%%GDATA_KEY + HashKey + 16]
+ sub %%HASHK_PTR, %%IA1
+ ;; HASHK_PTR
+ ;; - points at the first hash key to start GHASH with
+ ;; - needs to be updated as the message is processed (incremented)
+
+ ;; pre-load constants
+ vmovdqa64 %%ADD8BE, [rel ddq_addbe_8888]
+ vmovdqa64 %%ADD8LE, [rel ddq_add_8888]
+ vpxorq %%GH, %%GH
+ vpxorq %%GL, %%GL
+ vpxorq %%GM, %%GM
+
+ ;; prepare counter 8 blocks
+ vshufi64x2 %%CTR_BLOCKz, %%CTR_BLOCKz, %%CTR_BLOCKz, 0
+ vpaddd %%CTR_BLOCK2z, %%CTR_BLOCKz, [rel ddq_add_5678]
+ vpaddd %%CTR_BLOCKz, %%CTR_BLOCKz, [rel ddq_add_1234]
+ vpshufb %%CTR_BLOCKz, %%SHUF_MASK
+ vpshufb %%CTR_BLOCK2z, %%SHUF_MASK
+
+ ;; Process 7 full blocks plus a partial block
+ cmp %%LENGTH, 128
+ jl %%_encrypt_by_8_partial
+
+%%_encrypt_by_8_parallel:
+ ;; in_order vs. out_order is an optimization to increment the counter
+ ;; without shuffling it back into little endian.
+ ;; %%CTR_CHECK keeps track of when we need to increment in order so
+ ;; that the carry is handled correctly.
+
+ vmovq %%CTR_CHECK, XWORD(%%CTR_BLOCK_SAVE)
+
+%%_encrypt_by_8_new:
+ and WORD(%%CTR_CHECK), 255
+ add WORD(%%CTR_CHECK), 8
+
+ vmovdqu64 %%GH4KEY, [%%HASHK_PTR + (4 * 16)]
+ vmovdqu64 %%GH8KEY, [%%HASHK_PTR + (0 * 16)]
+
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%DATA_OFFSET, %%CTR_BLOCKz, %%CTR_BLOCK2z,\
+ %%BLK0, %%BLK1, %%AES_PARTIAL_BLOCK, \
+ out_order, %%ENC_DEC, full, %%IA0, %%IA1, %%LENGTH, %%INSTANCE_TYPE, \
+ %%GH4KEY, %%GH8KEY, %%SHUF_MASK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, %%ZTMP6, \
+ %%ZTMP7, %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, %%ZTMP12, \
+ %%MASKREG, no_reduction, %%GL, %%GH, %%GM
+
+ add %%HASHK_PTR, (8 * 16)
+ add %%DATA_OFFSET, 128
+ sub %%LENGTH, 128
+ jz %%_encrypt_done
+
+ cmp WORD(%%CTR_CHECK), (256 - 8)
+ jae %%_encrypt_by_8
+
+ vpaddd %%CTR_BLOCKz, %%ADD8BE
+ vpaddd %%CTR_BLOCK2z, %%ADD8BE
+
+ cmp %%LENGTH, 128
+ jl %%_encrypt_by_8_partial
+
+ jmp %%_encrypt_by_8_new
+
+%%_encrypt_by_8:
+ vpshufb %%CTR_BLOCKz, %%SHUF_MASK
+ vpshufb %%CTR_BLOCK2z, %%SHUF_MASK
+ vpaddd %%CTR_BLOCKz, %%ADD8LE
+ vpaddd %%CTR_BLOCK2z, %%ADD8LE
+ vpshufb %%CTR_BLOCKz, %%SHUF_MASK
+ vpshufb %%CTR_BLOCK2z, %%SHUF_MASK
+
+ cmp %%LENGTH, 128
+ jge %%_encrypt_by_8_new
+
+%%_encrypt_by_8_partial:
+ ;; Test to see if we need a by 8 with partial block. At this point
+ ;; bytes remaining should be either zero or between 113-127.
+ ;; 'in_order' shuffle needed to align key for partial block xor.
+ ;; 'out_order' is a little faster because it avoids extra shuffles.
+ ;; - counter blocks for the next 8 blocks are prepared and in BE format
+ ;; - we can go ahead with out_order scenario
+
+ vmovdqu64 %%GH4KEY, [%%HASHK_PTR + (4 * 16)]
+ vmovdqu64 %%GH8KEY, [%%HASHK_PTR + (0 * 16)]
+
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, \
+ %%DATA_OFFSET, %%CTR_BLOCKz, %%CTR_BLOCK2z, \
+ %%BLK0, %%BLK1, %%AES_PARTIAL_BLOCK, \
+ out_order, %%ENC_DEC, partial, %%IA0, %%IA1, %%LENGTH, %%INSTANCE_TYPE, \
+ %%GH4KEY, %%GH8KEY, %%SHUF_MASK, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, %%ZTMP6, \
+ %%ZTMP7, %%ZTMP8, %%ZTMP9, %%ZTMP10, %%ZTMP11, %%ZTMP12, \
+ %%MASKREG, no_reduction, %%GL, %%GH, %%GM
+
+ add %%HASHK_PTR, (8 * 16)
+ add %%DATA_OFFSET, (128 - 16)
+ sub %%LENGTH, (128 - 16)
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ mov [%%GDATA_CTX + PBlockLen], %%LENGTH
+ vmovdqu64 [%%GDATA_CTX + PBlockEncKey], %%AES_PARTIAL_BLOCK
+%endif
+
+%%_encrypt_done:
+ ;; Extract the last counter block in LE format
+ vextracti32x4 XWORD(%%CTR_BLOCK_SAVE), %%CTR_BLOCK2z, 3
+ vpshufb XWORD(%%CTR_BLOCK_SAVE), XWORD(%%SHUF_MASK)
+
+ ;; GHASH last cipher text blocks in xmm1-xmm8
+ ;; - if block 8th is partial in a multi-call path then skip the block
+%ifidn %%INSTANCE_TYPE, multi_call
+ cmp qword [%%GDATA_CTX + PBlockLen], 0
+ jz %%_hash_last_8
+
+ ;; save the 8th partial block as GHASH_LAST_7 will clobber %%BLK1
+ vextracti32x4 XWORD(%%ZTMP7), %%BLK1, 3
+
+ GHASH_LAST_7 %%GDATA_KEY, %%BLK1, %%BLK0, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, %%ZTMP6, \
+ %%AAD_HASHx, %%MASKREG, %%IA0, %%GH, %%GL, %%GM
+
+ ;; XOR the partial word into the hash
+ vpxorq %%AAD_HASHx, %%AAD_HASHx, XWORD(%%ZTMP7)
+ jmp %%_ghash_done
+%%_hash_last_8:
+%endif
+ GHASH_LAST_8 %%GDATA_KEY, %%BLK1, %%BLK0, \
+ %%ZTMP0, %%ZTMP1, %%ZTMP2, %%ZTMP3, %%ZTMP4, %%ZTMP5, %%AAD_HASHx, \
+ %%GH, %%GL, %%GM
+%%_ghash_done:
+ vmovdqu64 [%%GDATA_CTX + CurCount], XWORD(%%CTR_BLOCK_SAVE)
+ vmovdqu64 [%%GDATA_CTX + AadHash], %%AAD_HASHx
+%%_enc_dec_done:
+
+%endmacro ; GCM_ENC_DEC
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; Encrypt/decrypt the initial 16 blocks
+%macro INITIAL_BLOCKS_16 22
+%define %%IN %1 ; [in] input buffer
+%define %%OUT %2 ; [in] output buffer
+%define %%KP %3 ; [in] pointer to expanded keys
+%define %%DATA_OFFSET %4 ; [in] data offset
+%define %%GHASH %5 ; [in] ZMM with AAD (low 128 bits)
+%define %%CTR %6 ; [in] ZMM with CTR BE blocks 4x128 bits
+%define %%CTR_CHECK %7 ; [in/out] GPR with counter overflow check
+%define %%ADDBE_4x4 %8 ; [in] ZMM 4x128bits with value 4 (big endian)
+%define %%ADDBE_1234 %9 ; [in] ZMM 4x128bits with values 1, 2, 3 & 4 (big endian)
+%define %%T0 %10 ; [clobered] temporary ZMM register
+%define %%T1 %11 ; [clobered] temporary ZMM register
+%define %%T2 %12 ; [clobered] temporary ZMM register
+%define %%T3 %13 ; [clobered] temporary ZMM register
+%define %%T4 %14 ; [clobered] temporary ZMM register
+%define %%T5 %15 ; [clobered] temporary ZMM register
+%define %%T6 %16 ; [clobered] temporary ZMM register
+%define %%T7 %17 ; [clobered] temporary ZMM register
+%define %%T8 %18 ; [clobered] temporary ZMM register
+%define %%SHUF_MASK %19 ; [in] ZMM with BE/LE shuffle mask
+%define %%ENC_DEC %20 ; [in] ENC (encrypt) or DEC (decrypt) selector
+%define %%BLK_OFFSET %21 ; [in] stack frame offset to ciphered blocks
+%define %%DATA_DISPL %22 ; [in] fixed numerical data displacement/offset
+
+%define %%B00_03 %%T5
+%define %%B04_07 %%T6
+%define %%B08_11 %%T7
+%define %%B12_15 %%T8
+
+%assign stack_offset (%%BLK_OFFSET)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; prepare counter blocks
+
+ cmp BYTE(%%CTR_CHECK), (256 - 16)
+ jae %%_next_16_overflow
+ vpaddd %%B00_03, %%CTR, %%ADDBE_1234
+ vpaddd %%B04_07, %%B00_03, %%ADDBE_4x4
+ vpaddd %%B08_11, %%B04_07, %%ADDBE_4x4
+ vpaddd %%B12_15, %%B08_11, %%ADDBE_4x4
+ jmp %%_next_16_ok
+%%_next_16_overflow:
+ vpshufb %%CTR, %%CTR, %%SHUF_MASK
+ vmovdqa64 %%B12_15, [rel ddq_add_4444]
+ vpaddd %%B00_03, %%CTR, [rel ddq_add_1234]
+ vpaddd %%B04_07, %%B00_03, %%B12_15
+ vpaddd %%B08_11, %%B04_07, %%B12_15
+ vpaddd %%B12_15, %%B08_11, %%B12_15
+ vpshufb %%B00_03, %%SHUF_MASK
+ vpshufb %%B04_07, %%SHUF_MASK
+ vpshufb %%B08_11, %%SHUF_MASK
+ vpshufb %%B12_15, %%SHUF_MASK
+%%_next_16_ok:
+ vshufi64x2 %%CTR, %%B12_15, %%B12_15, 1111_1111b
+ add BYTE(%%CTR_CHECK), 16
+
+ ;; === load 16 blocks of data
+ VX512LDR %%T0, [%%IN + %%DATA_OFFSET + %%DATA_DISPL + (64*0)]
+ VX512LDR %%T1, [%%IN + %%DATA_OFFSET + %%DATA_DISPL + (64*1)]
+ VX512LDR %%T2, [%%IN + %%DATA_OFFSET + %%DATA_DISPL + (64*2)]
+ VX512LDR %%T3, [%%IN + %%DATA_OFFSET + %%DATA_DISPL + (64*3)]
+
+ ;; move to AES encryption rounds
+%assign i 0
+ vbroadcastf64x2 %%T4, [%%KP + (16*i)]
+ vpxorq %%B00_03, %%B00_03, %%T4
+ vpxorq %%B04_07, %%B04_07, %%T4
+ vpxorq %%B08_11, %%B08_11, %%T4
+ vpxorq %%B12_15, %%B12_15, %%T4
+%assign i (i + 1)
+
+%rep NROUNDS
+ vbroadcastf64x2 %%T4, [%%KP + (16*i)]
+ vaesenc %%B00_03, %%B00_03, %%T4
+ vaesenc %%B04_07, %%B04_07, %%T4
+ vaesenc %%B08_11, %%B08_11, %%T4
+ vaesenc %%B12_15, %%B12_15, %%T4
+%assign i (i + 1)
+%endrep
+
+ vbroadcastf64x2 %%T4, [%%KP + (16*i)]
+ vaesenclast %%B00_03, %%B00_03, %%T4
+ vaesenclast %%B04_07, %%B04_07, %%T4
+ vaesenclast %%B08_11, %%B08_11, %%T4
+ vaesenclast %%B12_15, %%B12_15, %%T4
+
+ ;; xor against text
+ vpxorq %%B00_03, %%B00_03, %%T0
+ vpxorq %%B04_07, %%B04_07, %%T1
+ vpxorq %%B08_11, %%B08_11, %%T2
+ vpxorq %%B12_15, %%B12_15, %%T3
+
+ ;; store
+ VX512STR [%%OUT + %%DATA_OFFSET + %%DATA_DISPL + (64*0)], %%B00_03
+ VX512STR [%%OUT + %%DATA_OFFSET + %%DATA_DISPL + (64*1)], %%B04_07
+ VX512STR [%%OUT + %%DATA_OFFSET + %%DATA_DISPL + (64*2)], %%B08_11
+ VX512STR [%%OUT + %%DATA_OFFSET + %%DATA_DISPL + (64*3)], %%B12_15
+
+%ifidn %%ENC_DEC, DEC
+ ;; decryption - cipher text needs to go to GHASH phase
+ vpshufb %%B00_03, %%T0, %%SHUF_MASK
+ vpshufb %%B04_07, %%T1, %%SHUF_MASK
+ vpshufb %%B08_11, %%T2, %%SHUF_MASK
+ vpshufb %%B12_15, %%T3, %%SHUF_MASK
+%else
+ ;; encryption
+ vpshufb %%B00_03, %%B00_03, %%SHUF_MASK
+ vpshufb %%B04_07, %%B04_07, %%SHUF_MASK
+ vpshufb %%B08_11, %%B08_11, %%SHUF_MASK
+ vpshufb %%B12_15, %%B12_15, %%SHUF_MASK
+%endif
+
+%ifnidn %%GHASH, no_ghash
+ ;; === xor cipher block 0 with GHASH for the next GHASH round
+ vpxorq %%B00_03, %%B00_03, %%GHASH
+%endif
+
+ vmovdqa64 [rsp + stack_offset + (0 * 64)], %%B00_03
+ vmovdqa64 [rsp + stack_offset + (1 * 64)], %%B04_07
+ vmovdqa64 [rsp + stack_offset + (2 * 64)], %%B08_11
+ vmovdqa64 [rsp + stack_offset + (3 * 64)], %%B12_15
+%endmacro ;INITIAL_BLOCKS_16
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; Encrypt the initial N x 16 blocks
+;;; - A x 16 blocks are encrypted/decrypted first (pipeline depth)
+;;; - B x 16 blocks are encrypted/decrypted and previous A x 16 are ghashed
+;;; - A + B = N
+%macro INITIAL_BLOCKS_Nx16 39
+%define %%IN %1 ; [in] input buffer
+%define %%OUT %2 ; [in] output buffer
+%define %%KP %3 ; [in] pointer to expanded keys
+%define %%DATA_OFFSET %4 ; [in/out] data offset
+%define %%GHASH %5 ; [in] ZMM with AAD (low 128 bits)
+%define %%CTR %6 ; [in/out] ZMM with CTR: in - LE & 128b; out - BE & 4x128b
+%define %%CTR_CHECK %7 ; [in/out] GPR with counter overflow check
+%define %%T0 %8 ; [clobered] temporary ZMM register
+%define %%T1 %9 ; [clobered] temporary ZMM register
+%define %%T2 %10 ; [clobered] temporary ZMM register
+%define %%T3 %11 ; [clobered] temporary ZMM register
+%define %%T4 %12 ; [clobered] temporary ZMM register
+%define %%T5 %13 ; [clobered] temporary ZMM register
+%define %%T6 %14 ; [clobered] temporary ZMM register
+%define %%T7 %15 ; [clobered] temporary ZMM register
+%define %%T8 %16 ; [clobered] temporary ZMM register
+%define %%T9 %17 ; [clobered] temporary ZMM register
+%define %%T10 %18 ; [clobered] temporary ZMM register
+%define %%T11 %19 ; [clobered] temporary ZMM register
+%define %%T12 %20 ; [clobered] temporary ZMM register
+%define %%T13 %21 ; [clobered] temporary ZMM register
+%define %%T14 %22 ; [clobered] temporary ZMM register
+%define %%T15 %23 ; [clobered] temporary ZMM register
+%define %%T16 %24 ; [clobered] temporary ZMM register
+%define %%T17 %25 ; [clobered] temporary ZMM register
+%define %%T18 %26 ; [clobered] temporary ZMM register
+%define %%T19 %27 ; [clobered] temporary ZMM register
+%define %%T20 %28 ; [clobered] temporary ZMM register
+%define %%T21 %29 ; [clobered] temporary ZMM register
+%define %%T22 %30 ; [clobered] temporary ZMM register
+%define %%GH %31 ; [out] ZMM ghash sum (high)
+%define %%GL %32 ; [out] ZMM ghash sum (low)
+%define %%GM %33 ; [out] ZMM ghash sum (middle)
+%define %%ADDBE_4x4 %34 ; [in] ZMM 4x128bits with value 4 (big endian)
+%define %%ADDBE_1234 %35 ; [in] ZMM 4x128bits with values 1, 2, 3 & 4 (big endian)
+%define %%SHUF_MASK %36 ; [in] ZMM with BE/LE shuffle mask
+%define %%ENC_DEC %37 ; [in] ENC (encrypt) or DEC (decrypt) selector
+%define %%NBLOCKS %38 ; [in] number of blocks: multiple of 16
+%define %%DEPTH_BLK %39 ; [in] pipline depth, number of blocks (mulitple of 16)
+
+%assign aesout_offset (STACK_LOCAL_OFFSET + (0 * 16))
+%assign ghashin_offset (STACK_LOCAL_OFFSET + (0 * 16))
+%assign hkey_offset HashKey_ %+ %%NBLOCKS
+%assign data_in_out_offset 0
+
+ ;; set up CTR_CHECK
+ vmovd DWORD(%%CTR_CHECK), XWORD(%%CTR)
+ and DWORD(%%CTR_CHECK), 255
+
+ ;; in LE format after init, convert to BE
+ vshufi64x2 %%CTR, %%CTR, %%CTR, 0
+ vpshufb %%CTR, %%CTR, %%SHUF_MASK
+
+ ;; ==== AES lead in
+
+ ;; first 16 blocks - just cipher
+ INITIAL_BLOCKS_16 %%IN, %%OUT, %%KP, %%DATA_OFFSET, \
+ %%GHASH, %%CTR, %%CTR_CHECK, %%ADDBE_4x4, %%ADDBE_1234, \
+ %%T0, %%T1, %%T2, %%T3, %%T4, \
+ %%T5, %%T6, %%T7, %%T8, \
+ %%SHUF_MASK, %%ENC_DEC, aesout_offset, data_in_out_offset
+
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+
+%if (%%DEPTH_BLK > 16)
+%rep ((%%DEPTH_BLK - 16) / 16)
+ INITIAL_BLOCKS_16 %%IN, %%OUT, %%KP, %%DATA_OFFSET, \
+ no_ghash, %%CTR, %%CTR_CHECK, %%ADDBE_4x4, %%ADDBE_1234, \
+ %%T0, %%T1, %%T2, %%T3, %%T4, \
+ %%T5, %%T6, %%T7, %%T8, \
+ %%SHUF_MASK, %%ENC_DEC, aesout_offset, data_in_out_offset
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+%endrep
+%endif
+
+ ;; ==== GHASH + AES follows
+
+ ;; first 16 blocks stitched
+ GHASH_16_ENCRYPT_16_PARALLEL %%KP, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR, %%CTR_CHECK, \
+ hkey_offset, aesout_offset, ghashin_offset, %%SHUF_MASK, \
+ %%T0, %%T1, %%T2, %%T3, \
+ %%T4, %%T5, %%T6, %%T7, \
+ %%T8, %%T9, %%T10, %%T11,\
+ %%T12, %%T13, %%T14, %%T15,\
+ %%T16, %%T17, %%T18, %%T19, \
+ %%T20, %%T21, %%T22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GL, %%GH, %%GM, \
+ first_time, %%ENC_DEC, data_in_out_offset, no_ghash_in
+
+%if ((%%NBLOCKS - %%DEPTH_BLK) > 16)
+%rep ((%%NBLOCKS - %%DEPTH_BLK - 16) / 16)
+%assign ghashin_offset (ghashin_offset + (16 * 16))
+%assign hkey_offset (hkey_offset + (16 * 16))
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+
+ ;; mid 16 blocks - stitched
+ GHASH_16_ENCRYPT_16_PARALLEL %%KP, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR, %%CTR_CHECK, \
+ hkey_offset, aesout_offset, ghashin_offset, %%SHUF_MASK, \
+ %%T0, %%T1, %%T2, %%T3, \
+ %%T4, %%T5, %%T6, %%T7, \
+ %%T8, %%T9, %%T10, %%T11,\
+ %%T12, %%T13, %%T14, %%T15,\
+ %%T16, %%T17, %%T18, %%T19, \
+ %%T20, %%T21, %%T22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GL, %%GH, %%GM, \
+ no_reduction, %%ENC_DEC, data_in_out_offset, no_ghash_in
+%endrep
+%endif
+ add %%DATA_OFFSET, (%%NBLOCKS * 16)
+
+%endmacro ;INITIAL_BLOCKS_Nx16
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; GHASH the last 16 blocks of cipher text (last part of by 32/64/128 code)
+%macro GHASH_LAST_Nx16 23
+%define %%KP %1 ; [in] pointer to expanded keys
+%define %%GHASH %2 ; [out] ghash output
+%define %%T1 %3 ; [clobbered] temporary ZMM
+%define %%T2 %4 ; [clobbered] temporary ZMM
+%define %%T3 %5 ; [clobbered] temporary ZMM
+%define %%T4 %6 ; [clobbered] temporary ZMM
+%define %%T5 %7 ; [clobbered] temporary ZMM
+%define %%T6 %8 ; [clobbered] temporary ZMM
+%define %%T7 %9 ; [clobbered] temporary ZMM
+%define %%T8 %10 ; [clobbered] temporary ZMM
+%define %%T9 %11 ; [clobbered] temporary ZMM
+%define %%T10 %12 ; [clobbered] temporary ZMM
+%define %%T11 %13 ; [clobbered] temporary ZMM
+%define %%T12 %14 ; [clobbered] temporary ZMM
+%define %%T13 %15 ; [clobbered] temporary ZMM
+%define %%T14 %16 ; [clobbered] temporary ZMM
+%define %%T15 %17 ; [clobbered] temporary ZMM
+%define %%T16 %18 ; [clobbered] temporary ZMM
+%define %%GH %19 ; [in/cloberred] ghash sum (high)
+%define %%GL %20 ; [in/cloberred] ghash sum (low)
+%define %%GM %21 ; [in/cloberred] ghash sum (medium)
+%define %%LOOP_BLK %22 ; [in] numerical number of blocks handled by the loop
+%define %%DEPTH_BLK %23 ; [in] numerical number, pipeline depth (ghash vs aes)
+
+%define %%T0H %%T1
+%define %%T0L %%T2
+%define %%T0M1 %%T3
+%define %%T0M2 %%T4
+
+%define %%T1H %%T5
+%define %%T1L %%T6
+%define %%T1M1 %%T7
+%define %%T1M2 %%T8
+
+%define %%T2H %%T9
+%define %%T2L %%T10
+%define %%T2M1 %%T11
+%define %%T2M2 %%T12
+
+%define %%BLK1 %%T13
+%define %%BLK2 %%T14
+
+%define %%HK1 %%T15
+%define %%HK2 %%T16
+
+%assign hashk HashKey_ %+ %%DEPTH_BLK
+%assign cipher_blk (STACK_LOCAL_OFFSET + ((%%LOOP_BLK - %%DEPTH_BLK) * 16))
+
+ ;; load cipher blocks and ghash keys
+ vmovdqa64 %%BLK1, [rsp + cipher_blk]
+ vmovdqa64 %%BLK2, [rsp + cipher_blk + 64]
+ vmovdqu64 %%HK1, [%%KP + hashk]
+ vmovdqu64 %%HK2, [%%KP + hashk + 64]
+ ;; ghash blocks 0-3
+ vpclmulqdq %%T0H, %%BLK1, %%HK1, 0x11 ; %%TH = a1*b1
+ vpclmulqdq %%T0L, %%BLK1, %%HK1, 0x00 ; %%TL = a0*b0
+ vpclmulqdq %%T0M1, %%BLK1, %%HK1, 0x01 ; %%TM1 = a1*b0
+ vpclmulqdq %%T0M2, %%BLK1, %%HK1, 0x10 ; %%TM2 = a0*b1
+ ;; ghash blocks 4-7
+ vpclmulqdq %%T1H, %%BLK2, %%HK2, 0x11 ; %%TTH = a1*b1
+ vpclmulqdq %%T1L, %%BLK2, %%HK2, 0x00 ; %%TTL = a0*b0
+ vpclmulqdq %%T1M1, %%BLK2, %%HK2, 0x01 ; %%TTM1 = a1*b0
+ vpclmulqdq %%T1M2, %%BLK2, %%HK2, 0x10 ; %%TTM2 = a0*b1
+ vpternlogq %%T0H, %%T1H, %%GH, 0x96 ; T0H = T0H + T1H + GH
+ vpternlogq %%T0L, %%T1L, %%GL, 0x96 ; T0L = T0L + T1L + GL
+ vpternlogq %%T0M1, %%T1M1, %%GM, 0x96 ; T0M1 = T0M1 + T1M1 + GM
+ vpxorq %%T0M2, %%T0M2, %%T1M2 ; T0M2 = T0M2 + T1M2
+
+%rep ((%%DEPTH_BLK - 8) / 8)
+%assign hashk (hashk + 128)
+%assign cipher_blk (cipher_blk + 128)
+
+ ;; remaining blocks
+ ;; load next 8 cipher blocks and corresponding ghash keys
+ vmovdqa64 %%BLK1, [rsp + cipher_blk]
+ vmovdqa64 %%BLK2, [rsp + cipher_blk + 64]
+ vmovdqu64 %%HK1, [%%KP + hashk]
+ vmovdqu64 %%HK2, [%%KP + hashk + 64]
+ ;; ghash blocks 0-3
+ vpclmulqdq %%T1H, %%BLK1, %%HK1, 0x11 ; %%TH = a1*b1
+ vpclmulqdq %%T1L, %%BLK1, %%HK1, 0x00 ; %%TL = a0*b0
+ vpclmulqdq %%T1M1, %%BLK1, %%HK1, 0x01 ; %%TM1 = a1*b0
+ vpclmulqdq %%T1M2, %%BLK1, %%HK1, 0x10 ; %%TM2 = a0*b1
+ ;; ghash blocks 4-7
+ vpclmulqdq %%T2H, %%BLK2, %%HK2, 0x11 ; %%TTH = a1*b1
+ vpclmulqdq %%T2L, %%BLK2, %%HK2, 0x00 ; %%TTL = a0*b0
+ vpclmulqdq %%T2M1, %%BLK2, %%HK2, 0x01 ; %%TTM1 = a1*b0
+ vpclmulqdq %%T2M2, %%BLK2, %%HK2, 0x10 ; %%TTM2 = a0*b1
+ ;; update sums
+ vpternlogq %%T0H, %%T1H, %%T2H, 0x96 ; TH = T0H + T1H + T2H
+ vpternlogq %%T0L, %%T1L, %%T2L, 0x96 ; TL = T0L + T1L + T2L
+ vpternlogq %%T0M1, %%T1M1, %%T2M1, 0x96 ; TM1 = T0M1 + T1M1 xor T2M1
+ vpternlogq %%T0M2, %%T1M2, %%T2M2, 0x96 ; TM2 = T0M2 + T1M1 xor T2M2
+%endrep
+
+ ;; integrate TM into TH and TL
+ vpxorq %%T0M1, %%T0M1, %%T0M2
+ vpsrldq %%T1M1, %%T0M1, 8
+ vpslldq %%T1M2, %%T0M1, 8
+ vpxorq %%T0H, %%T0H, %%T1M1
+ vpxorq %%T0L, %%T0L, %%T1M2
+
+ ;; add TH and TL 128-bit words horizontally
+ VHPXORI4x128 %%T0H, %%T2M1
+ VHPXORI4x128 %%T0L, %%T2M2
+
+ ;; reduction
+ vmovdqa64 %%HK1, [rel POLY2]
+ VCLMUL_REDUCE %%GHASH, %%HK1, %%T0H, %%T0L, %%T0M1, %%T0M2
+%endmacro
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; Encrypt & ghash multiples of 16 blocks
+
+%macro GHASH_ENCRYPT_Nx16_PARALLEL 39
+%define %%IN %1 ; [in] input buffer
+%define %%OUT %2 ; [in] output buffer
+%define %%GDATA_KEY %3 ; [in] pointer to expanded keys
+%define %%DATA_OFFSET %4 ; [in/out] data offset
+%define %%CTR_BE %5 ; [in/out] ZMM last counter block
+%define %%SHFMSK %6 ; [in] ZMM with byte swap mask for pshufb
+%define %%ZT0 %7 ; [clobered] temporary ZMM register
+%define %%ZT1 %8 ; [clobered] temporary ZMM register
+%define %%ZT2 %9 ; [clobered] temporary ZMM register
+%define %%ZT3 %10 ; [clobered] temporary ZMM register
+%define %%ZT4 %11 ; [clobered] temporary ZMM register
+%define %%ZT5 %12 ; [clobered] temporary ZMM register
+%define %%ZT6 %13 ; [clobered] temporary ZMM register
+%define %%ZT7 %14 ; [clobered] temporary ZMM register
+%define %%ZT8 %15 ; [clobered] temporary ZMM register
+%define %%ZT9 %16 ; [clobered] temporary ZMM register
+%define %%ZT10 %17 ; [clobered] temporary ZMM register
+%define %%ZT11 %18 ; [clobered] temporary ZMM register
+%define %%ZT12 %19 ; [clobered] temporary ZMM register
+%define %%ZT13 %20 ; [clobered] temporary ZMM register
+%define %%ZT14 %21 ; [clobered] temporary ZMM register
+%define %%ZT15 %22 ; [clobered] temporary ZMM register
+%define %%ZT16 %23 ; [clobered] temporary ZMM register
+%define %%ZT17 %24 ; [clobered] temporary ZMM register
+%define %%ZT18 %25 ; [clobered] temporary ZMM register
+%define %%ZT19 %26 ; [clobered] temporary ZMM register
+%define %%ZT20 %27 ; [clobered] temporary ZMM register
+%define %%ZT21 %28 ; [clobered] temporary ZMM register
+%define %%ZT22 %29 ; [clobered] temporary ZMM register
+%define %%GTH %30 ; [in/out] ZMM GHASH sum (high)
+%define %%GTL %31 ; [in/out] ZMM GHASH sum (low)
+%define %%GTM %32 ; [in/out] ZMM GHASH sum (medium)
+%define %%ADDBE_4x4 %33 ; [in] ZMM 4x128bits with value 4 (big endian)
+%define %%ADDBE_1234 %34 ; [in] ZMM 4x128bits with values 1, 2, 3 & 4 (big endian)
+%define %%GHASH %35 ; [clobbered] ZMM with intermidiate GHASH value
+%define %%ENC_DEC %36 ; [in] ENC (encrypt) or DEC (decrypt) selector
+%define %%NUM_BLOCKS %37 ; [in] number of blocks to process in the loop
+%define %%DEPTH_BLK %38 ; [in] pipeline depth in blocks
+%define %%CTR_CHECK %39 ; [in/out] counter to check byte overflow
+
+%assign aesout_offset (STACK_LOCAL_OFFSET + (0 * 16))
+%assign ghashin_offset (STACK_LOCAL_OFFSET + ((%%NUM_BLOCKS - %%DEPTH_BLK) * 16))
+%assign hkey_offset HashKey_ %+ %%DEPTH_BLK
+%assign data_in_out_offset 0
+
+ ;; mid 16 blocks
+%if (%%DEPTH_BLK > 16)
+%rep ((%%DEPTH_BLK - 16) / 16)
+ GHASH_16_ENCRYPT_16_PARALLEL %%GDATA_KEY, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR_BE, %%CTR_CHECK, \
+ hkey_offset, aesout_offset, ghashin_offset, %%SHFMSK, \
+ %%ZT0, %%ZT1, %%ZT2, %%ZT3, \
+ %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%ZT8, %%ZT9, %%ZT10, %%ZT11,\
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15,\
+ %%ZT16, %%ZT17, %%ZT18, %%ZT19, \
+ %%ZT20, %%ZT21, %%ZT22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GTL, %%GTH, %%GTM, \
+ no_reduction, %%ENC_DEC, data_in_out_offset, no_ghash_in
+
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign ghashin_offset (ghashin_offset + (16 * 16))
+%assign hkey_offset (hkey_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+%endrep
+%endif
+
+ ;; 16 blocks with reduction
+ GHASH_16_ENCRYPT_16_PARALLEL %%GDATA_KEY, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR_BE, %%CTR_CHECK, \
+ HashKey_16, aesout_offset, ghashin_offset, %%SHFMSK, \
+ %%ZT0, %%ZT1, %%ZT2, %%ZT3, \
+ %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%ZT8, %%ZT9, %%ZT10, %%ZT11,\
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15,\
+ %%ZT16, %%ZT17, %%ZT18, %%ZT19, \
+ %%ZT20, %%ZT21, %%ZT22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GTL, %%GTH, %%GTM, \
+ final_reduction, %%ENC_DEC, data_in_out_offset, no_ghash_in
+
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+%assign ghashin_offset (STACK_LOCAL_OFFSET + (0 * 16))
+%assign hkey_offset HashKey_ %+ %%NUM_BLOCKS
+
+ ;; === xor cipher block 0 with GHASH (ZT4)
+ vmovdqa64 %%GHASH, %%ZT4
+
+ ;; start the pipeline again
+ GHASH_16_ENCRYPT_16_PARALLEL %%GDATA_KEY, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR_BE, %%CTR_CHECK, \
+ hkey_offset, aesout_offset, ghashin_offset, %%SHFMSK, \
+ %%ZT0, %%ZT1, %%ZT2, %%ZT3, \
+ %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%ZT8, %%ZT9, %%ZT10, %%ZT11,\
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15,\
+ %%ZT16, %%ZT17, %%ZT18, %%ZT19, \
+ %%ZT20, %%ZT21, %%ZT22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GTL, %%GTH, %%GTM, \
+ first_time, %%ENC_DEC, data_in_out_offset, %%GHASH
+
+%if ((%%NUM_BLOCKS - %%DEPTH_BLK) > 16)
+%rep ((%%NUM_BLOCKS - %%DEPTH_BLK - 16 ) / 16)
+
+%assign aesout_offset (aesout_offset + (16 * 16))
+%assign data_in_out_offset (data_in_out_offset + (16 * 16))
+%assign ghashin_offset (ghashin_offset + (16 * 16))
+%assign hkey_offset (hkey_offset + (16 * 16))
+
+ GHASH_16_ENCRYPT_16_PARALLEL %%GDATA_KEY, %%OUT, %%IN, %%DATA_OFFSET, \
+ %%CTR_BE, %%CTR_CHECK, \
+ hkey_offset, aesout_offset, ghashin_offset, %%SHFMSK, \
+ %%ZT0, %%ZT1, %%ZT2, %%ZT3, \
+ %%ZT4, %%ZT5, %%ZT6, %%ZT7, \
+ %%ZT8, %%ZT9, %%ZT10, %%ZT11,\
+ %%ZT12, %%ZT13, %%ZT14, %%ZT15,\
+ %%ZT16, %%ZT17, %%ZT18, %%ZT19, \
+ %%ZT20, %%ZT21, %%ZT22, \
+ %%ADDBE_4x4, %%ADDBE_1234, \
+ %%GTL, %%GTH, %%GTM, \
+ no_reduction, %%ENC_DEC, data_in_out_offset, no_ghash_in
+%endrep
+%endif
+
+ add %%DATA_OFFSET, (%%NUM_BLOCKS * 16)
+
+%endmacro ;GHASH_ENCRYPT_Nx16_PARALLEL
+;;; ===========================================================================
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_COMPLETE Finishes Encyrption/Decryption of last partial block after GCM_UPDATE finishes.
+; Input: A gcm_key_data * (GDATA_KEY), gcm_context_data (GDATA_CTX) and whether encoding or decoding (ENC_DEC).
+; Output: Authorization Tag (AUTH_TAG) and Authorization Tag length (AUTH_TAG_LEN)
+; Clobbers rax, r10-r12, and xmm0, xmm1, xmm5, xmm6, xmm9, xmm11, xmm14, xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_COMPLETE 6
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%AUTH_TAG %3
+%define %%AUTH_TAG_LEN %4
+%define %%ENC_DEC %5
+%define %%INSTANCE_TYPE %6
+%define %%PLAIN_CYPH_LEN rax
+
+ vmovdqu xmm13, [%%GDATA_KEY + HashKey]
+ ;; Start AES as early as possible
+ vmovdqu xmm9, [%%GDATA_CTX + OrigIV] ; xmm9 = Y0
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9 ; E(K, Y0)
+
+%ifidn %%INSTANCE_TYPE, multi_call
+ ;; If the GCM function is called as a single function call rather
+ ;; than invoking the individual parts (init, update, finalize) we
+ ;; can remove a write to read dependency on AadHash.
+ vmovdqu xmm14, [%%GDATA_CTX + AadHash]
+
+ ;; Encrypt the final partial block. If we did this as a single call then
+ ;; the partial block was handled in the main GCM_ENC_DEC macro.
+ mov r12, [%%GDATA_CTX + PBlockLen]
+ cmp r12, 0
+
+ je %%_partial_done
+
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ vmovdqu [%%GDATA_CTX + AadHash], xmm14
+
+%%_partial_done:
+
+%endif
+
+ mov r12, [%%GDATA_CTX + AadLen] ; r12 = aadLen (number of bytes)
+ mov %%PLAIN_CYPH_LEN, [%%GDATA_CTX + InLen]
+
+ shl r12, 3 ; convert into number of bits
+ vmovd xmm15, r12d ; len(A) in xmm15
+
+ shl %%PLAIN_CYPH_LEN, 3 ; len(C) in bits (*128)
+ vmovq xmm1, %%PLAIN_CYPH_LEN
+ vpslldq xmm15, xmm15, 8 ; xmm15 = len(A)|| 0x0000000000000000
+ vpxor xmm15, xmm15, xmm1 ; xmm15 = len(A)||len(C)
+
+ vpxor xmm14, xmm15
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6
+ vpshufb xmm14, [rel SHUF_MASK] ; perform a 16Byte swap
+
+ vpxor xmm9, xmm9, xmm14
+
+
+%%_return_T:
+ mov r10, %%AUTH_TAG ; r10 = authTag
+ mov r11, %%AUTH_TAG_LEN ; r11 = auth_tag_len
+
+ cmp r11, 16
+ je %%_T_16
+
+ cmp r11, 12
+ je %%_T_12
+
+ cmp r11, 8
+ je %%_T_8
+
+ simd_store_avx_15 r10, xmm9, r11, r12, rax
+ jmp %%_return_T_done
+%%_T_8:
+ vmovq rax, xmm9
+ mov [r10], rax
+ jmp %%_return_T_done
+%%_T_12:
+ vmovq rax, xmm9
+ mov [r10], rax
+ vpsrldq xmm9, xmm9, 8
+ vmovd eax, xmm9
+ mov [r10 + 8], eax
+ jmp %%_return_T_done
+%%_T_16:
+ vmovdqu [r10], xmm9
+
+%%_return_T_done:
+
+%ifdef SAFE_DATA
+ ;; Clear sensitive data from context structure
+ vpxor xmm0, xmm0
+ vmovdqu [%%GDATA_CTX + AadHash], xmm0
+ vmovdqu [%%GDATA_CTX + PBlockEncKey], xmm0
+%endif
+%endmacro ; GCM_COMPLETE
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_precomp_128_vaes_avx512 /
+; aes_gcm_precomp_192_vaes_avx512 /
+; aes_gcm_precomp_256_vaes_avx512
+; (struct gcm_key_data *key_data)
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(precomp,_),function,)
+FN_NAME(precomp,_):
+;; Parameter is passed through register
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_precomp
+%endif
+
+ FUNC_SAVE
+
+ vpxor xmm6, xmm6
+ ENCRYPT_SINGLE_BLOCK arg1, xmm6 ; xmm6 = HashKey
+
+ vpshufb xmm6, [rel SHUF_MASK]
+ ;;;;;;;;;;;;;;; PRECOMPUTATION of HashKey<<1 mod poly from the HashKey;;;;;;;;;;;;;;;
+ vmovdqa xmm2, xmm6
+ vpsllq xmm6, xmm6, 1
+ vpsrlq xmm2, xmm2, 63
+ vmovdqa xmm1, xmm2
+ vpslldq xmm2, xmm2, 8
+ vpsrldq xmm1, xmm1, 8
+ vpor xmm6, xmm6, xmm2
+ ;reduction
+ vpshufd xmm2, xmm1, 00100100b
+ vpcmpeqd xmm2, [rel TWOONE]
+ vpand xmm2, xmm2, [rel POLY]
+ vpxor xmm6, xmm6, xmm2 ; xmm6 holds the HashKey<<1 mod poly
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ vmovdqu [arg1 + HashKey], xmm6 ; store HashKey<<1 mod poly
+
+
+ PRECOMPUTE arg1, xmm6, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5
+
+ FUNC_RESTORE
+exit_precomp:
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_init_128_vaes_avx512 / aes_gcm_init_192_vaes_avx512 / aes_gcm_init_256_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(init,_),function,)
+FN_NAME(init,_):
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_init
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_init
+
+ ;; Check IV != NULL
+ cmp arg3, 0
+ jz exit_init
+
+ ;; Check if aad_len == 0
+ cmp arg5, 0
+ jz skip_aad_check_init
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg4, 0
+ jz exit_init
+
+skip_aad_check_init:
+%endif
+ GCM_INIT arg1, arg2, arg3, arg4, arg5, r10, r11, r12, k1, xmm14, xmm2, \
+ zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7, zmm8, zmm9, zmm10
+
+exit_init:
+
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_update_vaes_avx512 / aes_gcm_enc_192_update_vaes_avx512 /
+; aes_gcm_enc_256_update_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_update_),function,)
+FN_NAME(enc,_update_):
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_enc
+
+skip_in_out_check_update_enc:
+%endif
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, multi_call
+
+exit_update_enc:
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_update_vaes_avx512 / aes_gcm_dec_192_update_vaes_avx512 /
+; aes_gcm_dec_256_update_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_update_),function,)
+FN_NAME(dec,_update_):
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_dec
+
+skip_in_out_check_update_dec:
+%endif
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, multi_call
+
+exit_update_dec:
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_finalize_vaes_avx512 / aes_gcm_enc_192_finalize_vaes_avx512 /
+; aes_gcm_enc_256_finalize_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_finalize_),function,)
+FN_NAME(enc,_finalize_):
+
+;; All parameters are passed through registers
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_enc_fin
+
+ cmp arg4, 16
+ ja exit_enc_fin
+%endif
+
+ FUNC_SAVE
+ GCM_COMPLETE arg1, arg2, arg3, arg4, ENC, multi_call
+
+ FUNC_RESTORE
+
+exit_enc_fin:
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_finalize_vaes_avx512 / aes_gcm_dec_192_finalize_vaes_avx512
+; aes_gcm_dec_256_finalize_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_finalize_),function,)
+FN_NAME(dec,_finalize_):
+
+;; All parameters are passed through registers
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_dec_fin
+
+ cmp arg4, 16
+ ja exit_dec_fin
+%endif
+
+ FUNC_SAVE
+ GCM_COMPLETE arg1, arg2, arg3, arg4, DEC, multi_call
+
+ FUNC_RESTORE
+
+exit_dec_fin:
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_vaes_avx512 / aes_gcm_enc_192_vaes_avx512 / aes_gcm_enc_256_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_),function,)
+FN_NAME(enc,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_enc
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_enc
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_enc
+
+ cmp arg10, 16
+ ja exit_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_enc
+
+skip_in_out_check_enc:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_enc
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_enc
+
+skip_aad_check_enc:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8, r10, r11, r12, k1, xmm14, xmm2, \
+ zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7, zmm8, zmm9, zmm10
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC, single_call
+ GCM_COMPLETE arg1, arg2, arg9, arg10, ENC, single_call
+
+exit_enc:
+ FUNC_RESTORE
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_vaes_avx512 / aes_gcm_dec_192_vaes_avx512 / aes_gcm_dec_256_vaes_avx512
+; (const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_),function,)
+FN_NAME(dec,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_dec
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_dec
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_dec
+
+ cmp arg10, 16
+ ja exit_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_dec
+
+skip_in_out_check_dec:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_dec
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_dec
+
+skip_aad_check_dec:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8, r10, r11, r12, k1, xmm14, xmm2, \
+ zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7, zmm8, zmm9, zmm10
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC, single_call
+ GCM_COMPLETE arg1, arg2, arg9, arg10, DEC, single_call
+
+exit_dec:
+ FUNC_RESTORE
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_flush_avx512.asm
new file mode 100644
index 000000000..449229531
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_flush_avx512.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X16 aes_cbc_enc_192_vaes_avx512
+%define FLUSH_JOB_AES_ENC flush_job_aes192_enc_vaes_avx512
+%define NUM_KEYS 13
+%include "avx512/mb_mgr_aes_flush_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_submit_avx512.asm
new file mode 100644
index 000000000..3bbb30158
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes192_submit_avx512.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X16 aes_cbc_enc_192_vaes_avx512
+%define SUBMIT_JOB_AES_ENC submit_job_aes192_enc_vaes_avx512
+%define NUM_KEYS 13
+%include "avx512/mb_mgr_aes_submit_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_flush_avx512.asm
new file mode 100644
index 000000000..2ff448393
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_flush_avx512.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X16 aes_cbc_enc_256_vaes_avx512
+%define FLUSH_JOB_AES_ENC flush_job_aes256_enc_vaes_avx512
+%define NUM_KEYS 15
+%include "avx512/mb_mgr_aes_flush_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_submit_avx512.asm
new file mode 100644
index 000000000..4db4629e2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes256_submit_avx512.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X16 aes_cbc_enc_256_vaes_avx512
+%define SUBMIT_JOB_AES_ENC submit_job_aes256_enc_vaes_avx512
+%define NUM_KEYS 15
+%include "avx512/mb_mgr_aes_submit_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_flush_avx512.asm
new file mode 100644
index 000000000..4a52ed1e6
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_flush_avx512.asm
@@ -0,0 +1,320 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "constants.asm"
+%include "include/reg_sizes.asm"
+
+%ifndef AES_CBC_ENC_X16
+%define AES_CBC_ENC_X16 aes_cbc_enc_128_vaes_avx512
+%define FLUSH_JOB_AES_ENC flush_job_aes128_enc_vaes_avx512
+%define NUM_KEYS 11
+%endif
+
+; void AES_CBC_ENC_X16(AES_ARGS *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X16
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+%define unused_lanes rbx
+%define tmp1 rbx
+
+%define good_lane rdx
+%define iv rdx
+
+%define tmp2 rax
+
+; idx needs to be in rbp
+%define tmp rbp
+%define idx rbp
+
+%define tmp3 r8
+%define tmp4 r9
+%endif
+
+; copy IV into NULL lanes
+%macro COPY_IV_TO_NULL_LANES 4
+%define %%IDX %1 ; [in] GP with good lane idx (scaled x16)
+%define %%NULL_MASK %2 ; [clobbered] GP to store NULL lane mask
+%define %%XTMP %3 ; [clobbered] temp XMM reg
+%define %%MASK_REG %4 ; [in] mask register
+
+ vmovdqa64 %%XTMP, [state + _aes_args_IV + %%IDX]
+ kmovw DWORD(%%NULL_MASK), %%MASK_REG
+%assign i 0
+%rep 16
+ bt %%NULL_MASK, i
+ jnc %%_skip_copy %+ i
+ vmovdqa64 [state + _aes_args_IV + (i*16)], %%XTMP
+%%_skip_copy %+ i:
+%assign i (i + 1)
+%endrep
+
+%endmacro
+
+; clear IV into NULL lanes
+%macro CLEAR_IV_IN_NULL_LANES 3
+%define %%NULL_MASK %1 ; [clobbered] GP to store NULL lane mask
+%define %%XTMP %2 ; [clobbered] temp XMM reg
+%define %%MASK_REG %3 ; [in] mask register
+
+ vpxorq %%XTMP, %%XTMP
+ kmovw DWORD(%%NULL_MASK), %%MASK_REG
+%assign i 0
+%rep 16
+ bt %%NULL_MASK, i
+ jnc %%_skip_clear %+ i
+ vmovdqa64 [state + _aes_args_IV + (i*16)], %%XTMP
+%%_skip_clear %+ i:
+%assign i (i + 1)
+%endrep
+
+%endmacro
+
+; copy round key's into NULL lanes
+%macro COPY_KEYS_TO_NULL_LANES 5
+%define %%IDX %1 ; [in] GP with good lane idx (scaled x16)
+%define %%NULL_MASK %2 ; [clobbered] GP to store NULL lane mask
+%define %%KEY_TAB %3 ; [clobbered] GP to store key table pointer
+%define %%XTMP %4 ; [clobbered] temp XMM reg
+%define %%MASK_REG %5 ; [in] mask register
+
+ lea %%KEY_TAB, [state + _aes_args_key_tab]
+ kmovw DWORD(%%NULL_MASK), %%MASK_REG
+%assign j 0 ; outer loop to iterate through round keys
+%rep 15
+ vmovdqa64 %%XTMP, [%%KEY_TAB + j + %%IDX]
+%assign k 0 ; inner loop to iterate through lanes
+%rep 16
+ bt %%NULL_MASK, k
+ jnc %%_skip_copy %+ j %+ _ %+ k
+ vmovdqa64 [%%KEY_TAB + j + (k*16)], %%XTMP
+%%_skip_copy %+ j %+ _ %+ k:
+%assign k (k + 1)
+%endrep
+
+%assign j (j + 256)
+%endrep
+
+%endmacro
+
+; clear round key's in NULL lanes
+%macro CLEAR_KEYS_IN_NULL_LANES 3
+%define %%NULL_MASK %1 ; [clobbered] GP to store NULL lane mask
+%define %%XTMP %2 ; [clobbered] temp XMM reg
+%define %%MASK_REG %3 ; [in] mask register
+
+ vpxorq %%XTMP, %%XTMP
+ kmovw DWORD(%%NULL_MASK), %%MASK_REG
+%assign k 0 ; outer loop to iterate through lanes
+%rep 16
+ bt %%NULL_MASK, k
+ jnc %%_skip_clear %+ k
+%assign j 0 ; inner loop to iterate through round keys
+%rep NUM_KEYS
+ vmovdqa64 [state + _aesarg_key_tab + j + (k*16)], %%XTMP
+%assign j (j + 256)
+%endrep
+%%_skip_clear %+ k:
+%assign k (k + 1)
+%endrep
+
+%endmacro
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FLUSH_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(FLUSH_JOB_AES_ENC,function,internal)
+FLUSH_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; check for empty
+ cmp qword [state + _aes_lanes_in_use], 0
+ je return_null
+
+ ; find a lane with a non-null job
+ vpxord zmm0, zmm0, zmm0
+ vmovdqu64 zmm1, [state + _aes_job_in_lane + (0*PTR_SZ)]
+ vmovdqu64 zmm2, [state + _aes_job_in_lane + (8*PTR_SZ)]
+ vpcmpq k1, zmm1, zmm0, 4 ; NEQ
+ vpcmpq k2, zmm2, zmm0, 4 ; NEQ
+ kmovw DWORD(tmp), k1
+ kmovw DWORD(tmp1), k2
+ mov DWORD(tmp2), DWORD(tmp1)
+ shl DWORD(tmp2), 8
+ or DWORD(tmp2), DWORD(tmp) ; mask of non-null jobs in tmp2
+ not BYTE(tmp)
+ kmovw k4, DWORD(tmp)
+ not BYTE(tmp1)
+ kmovw k5, DWORD(tmp1)
+ mov DWORD(tmp), DWORD(tmp2)
+ not WORD(tmp)
+ kmovw k6, DWORD(tmp) ; mask of NULL jobs in k4, k5 and k6
+ mov DWORD(tmp), DWORD(tmp2)
+ xor tmp2, tmp2
+ bsf WORD(tmp2), WORD(tmp) ; index of the 1st set bit in tmp2
+
+ ;; copy good lane data into NULL lanes
+ mov tmp, [state + _aes_args_in + tmp2*8]
+ vpbroadcastq zmm1, tmp
+ vmovdqa64 [state + _aes_args_in + (0*PTR_SZ)]{k4}, zmm1
+ vmovdqa64 [state + _aes_args_in + (8*PTR_SZ)]{k5}, zmm1
+ ;; - out pointer
+ mov tmp, [state + _aes_args_out + tmp2*8]
+ vpbroadcastq zmm1, tmp
+ vmovdqa64 [state + _aes_args_out + (0*PTR_SZ)]{k4}, zmm1
+ vmovdqa64 [state + _aes_args_out + (8*PTR_SZ)]{k5}, zmm1
+
+ ;; - set len to UINT16_MAX
+ mov WORD(tmp), 0xffff
+ vpbroadcastw ymm3, WORD(tmp)
+ vmovdqa64 ymm0, [state + _aes_lens]
+ vmovdqu16 ymm0{k6}, ymm3
+ vmovdqa64 [state + _aes_lens], ymm0
+
+ ;; Find min length for lanes 0-7
+ vphminposuw xmm2, xmm0
+
+ ;; scale up good lane idx before copying IV and keys
+ shl tmp2, 4
+ ;; - copy IV to null lanes
+ COPY_IV_TO_NULL_LANES tmp2, tmp1, xmm4, k6
+
+ ; extract min length of lanes 0-7
+ vpextrw DWORD(len2), xmm2, 0 ; min value
+ vpextrw DWORD(idx), xmm2, 1 ; min index
+
+ ;; - copy round keys to null lanes
+ COPY_KEYS_TO_NULL_LANES tmp2, tmp1, tmp3, xmm4, k6
+
+ ;; Update lens and find min for lanes 8-15
+ vextracti128 xmm1, ymm0, 1
+ vphminposuw xmm2, xmm1
+ vpextrw DWORD(tmp3), xmm2, 0 ; min value
+ cmp DWORD(len2), DWORD(tmp3)
+ jle use_min
+ vpextrw DWORD(idx), xmm2, 1 ; min index
+ add DWORD(idx), 8 ; but index +8
+ mov len2, tmp3 ; min len
+use_min:
+ vpbroadcastw ymm3, WORD(len2)
+ vpsubw ymm0, ymm0, ymm3
+ vmovdqa [state + _aes_lens], ymm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X16
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _aes_unused_lanes], unused_lanes
+ sub qword [state + _aes_lanes_in_use], 1
+
+%ifdef SAFE_DATA
+ ; Set bit of lane of returned job
+ xor DWORD(tmp3), DWORD(tmp3)
+ bts DWORD(tmp3), DWORD(idx)
+ kmovw k1, DWORD(tmp3)
+ korw k6, k1, k6
+
+ ;; Clear IV and expanded keys of returned job and "NULL lanes"
+ ;; (k6 contains the mask of the jobs)
+ CLEAR_IV_IN_NULL_LANES tmp1, xmm0, k6
+ CLEAR_KEYS_IN_NULL_LANES tmp1, xmm0, k6
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_submit_avx512.asm
new file mode 100644
index 000000000..f79d15f68
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_aes_submit_avx512.asm
@@ -0,0 +1,280 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+%ifndef AES_CBC_ENC_X16
+%define AES_CBC_ENC_X16 aes_cbc_enc_128_vaes_avx512
+%define NUM_KEYS 11
+%define SUBMIT_JOB_AES_ENC submit_job_aes128_enc_vaes_avx512
+%endif
+
+; void AES_CBC_ENC_X16(AES_ARGS_x16 *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X16
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+; idx needs to be in rbp
+%define len rbp
+%define idx rbp
+%define tmp r10
+%define tmp2 r11
+%define tmp3 r12
+
+%define lane r8
+
+%define iv r9
+
+%define unused_lanes rbx
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+
+%macro INSERT_KEYS 6
+%define %%KP %1 ; [in] GP reg with pointer to expanded keys
+%define %%LANE %2 ; [in] GP reg with lane number
+%define %%NKEYS %3 ; [in] number of round keys (numerical value)
+%define %%COL %4 ; [clobbered] GP reg
+%define %%ZTMP %5 ; [clobbered] ZMM reg
+%define %%IA0 %6 ; [clobbered] GP reg
+
+
+%assign ROW (16*16)
+
+ mov %%COL, %%LANE
+ shl %%COL, 4
+ lea %%IA0, [state + _aes_args_key_tab]
+ add %%COL, %%IA0
+
+ vmovdqu64 %%ZTMP, [%%KP]
+ vextracti64x2 [%%COL + ROW*0], %%ZTMP, 0
+ vextracti64x2 [%%COL + ROW*1], %%ZTMP, 1
+ vextracti64x2 [%%COL + ROW*2], %%ZTMP, 2
+ vextracti64x2 [%%COL + ROW*3], %%ZTMP, 3
+
+ vmovdqu64 %%ZTMP, [%%KP + 64]
+ vextracti64x2 [%%COL + ROW*4], %%ZTMP, 0
+ vextracti64x2 [%%COL + ROW*5], %%ZTMP, 1
+ vextracti64x2 [%%COL + ROW*6], %%ZTMP, 2
+ vextracti64x2 [%%COL + ROW*7], %%ZTMP, 3
+
+%if %%NKEYS > 11 ; 192 or 256 - copy 4 more keys
+ vmovdqu64 %%ZTMP, [%%KP + 128]
+ vextracti64x2 [%%COL + ROW*11], %%ZTMP, 3
+%else ; 128 - copy 3 more keys
+ mov %%IA0, 0x3f
+ kmovq k1, %%IA0
+ vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 128]
+%endif
+ vextracti64x2 [%%COL + ROW*8], %%ZTMP, 0
+ vextracti64x2 [%%COL + ROW*9], %%ZTMP, 1
+ vextracti64x2 [%%COL + ROW*10], %%ZTMP, 2
+
+%if %%NKEYS == 15 ; 256 - 3 more keys
+ mov %%IA0, 0x3f
+ kmovq k1, %%IA0
+ vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 192]
+ vextracti64x2 [%%COL + ROW*12], %%ZTMP, 0
+ vextracti64x2 [%%COL + ROW*13], %%ZTMP, 1
+ vextracti64x2 [%%COL + ROW*14], %%ZTMP, 2
+%elif %%NKEYS == 13 ; 192 - 1 more key
+ mov %%IA0, 0x3
+ kmovq k1, %%IA0
+ vmovdqu64 %%ZTMP{k1}{z}, [%%KP + 192]
+ vextracti64x2 [%%COL + ROW*12], %%ZTMP, 0
+%endif
+%endmacro
+
+; JOB* SUBMIT_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_ENC,function,internal)
+SUBMIT_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ mov len, [job + _msg_len_to_cipher_in_bytes]
+ and len, -16 ; DOCSIS may pass size unaligned to block size
+ mov iv, [job + _iv]
+ mov [state + _aes_unused_lanes], unused_lanes
+ add qword [state + _aes_lanes_in_use], 1
+
+ mov [state + _aes_job_in_lane + lane*8], job
+
+ ;; Update lane len
+ vmovdqa64 ymm0, [state + _aes_lens]
+ mov tmp2, rcx ; save rcx
+ mov rcx, lane
+ mov tmp, 1
+ shl tmp, cl
+ mov rcx, tmp2 ; restore rcx
+ kmovq k1, tmp
+
+ vpbroadcastw ymm1, WORD(len)
+ vmovdqu16 ymm0{k1}, ymm1
+ vmovdqa64 [state + _aes_lens], ymm0
+
+ ;; Find min length for lanes 0-7
+ vphminposuw xmm2, xmm0
+
+ ;; Update input pointer
+ mov tmp, [job + _src]
+ add tmp, [job + _cipher_start_src_offset_in_bytes]
+ vmovdqu xmm1, [iv]
+ mov [state + _aes_args_in + lane*8], tmp
+
+ ;; Insert expanded keys
+ mov tmp, [job + _aes_enc_key_expanded]
+ INSERT_KEYS tmp, lane, NUM_KEYS, tmp2, zmm4, tmp3
+
+ ;; Update output pointer
+ mov tmp, [job + _dst]
+ mov [state + _aes_args_out + lane*8], tmp
+ shl lane, 4 ; multiply by 16
+ vmovdqa [state + _aes_args_IV + lane], xmm1
+
+ cmp qword [state + _aes_lanes_in_use], 16
+ jne return_null
+
+ ; Find min length for lanes 8-15
+ vpextrw DWORD(len2), xmm2, 0 ; min value
+ vpextrw DWORD(idx), xmm2, 1 ; min index
+ vextracti128 xmm1, ymm0, 1
+ vphminposuw xmm2, xmm1
+ vpextrw DWORD(tmp), xmm2, 0 ; min value
+ cmp DWORD(len2), DWORD(tmp)
+ jle use_min
+ vpextrw DWORD(idx), xmm2, 1 ; min index
+ add DWORD(idx), 8 ; but index +8
+ mov len2, tmp ; min len
+use_min:
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw ymm3, WORD(len2)
+ vpsubw ymm0, ymm0, ymm3
+ vmovdqa [state + _aes_lens], ymm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X16
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 4
+ or unused_lanes, idx
+
+ mov [state + _aes_unused_lanes], unused_lanes
+ sub qword [state + _aes_lanes_in_use], 1
+
+%ifdef SAFE_DATA
+ ;; Clear IV
+ vpxorq xmm0, xmm0
+ shl idx, 4 ; multiply by 16
+ vmovdqa [state + _aes_args_IV + idx], xmm0
+
+ ;; Clear expanded keys
+%assign round 0
+%rep NUM_KEYS
+ vmovdqa [state + _aesarg_key_tab + round * (16*16) + idx], xmm0
+%assign round (round + 1)
+%endrep
+
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_avx512.c b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_avx512.c
new file mode 100644
index 000000000..bd1aaef63
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_avx512.c
@@ -0,0 +1,1066 @@
+/*******************************************************************************
+ Copyright (c) 2012-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define AVX512
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_zmms
+
+#include "intel-ipsec-mb.h"
+#include "include/kasumi_internal.h"
+#include "include/zuc_internal.h"
+#include "include/snow3g.h"
+
+#include "save_xmms.h"
+#include "asm.h"
+#include "des.h"
+#include "gcm.h"
+#include "cpu_feature.h"
+#include "noaesni.h"
+
+JOB_AES_HMAC *submit_job_aes128_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes128_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes192_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_avx(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes256_enc_avx(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_xcbc_avx(MB_MGR_AES_XCBC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes128_enc_vaes_avx512(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes128_enc_vaes_avx512(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_vaes_avx512(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes192_enc_vaes_avx512(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_vaes_avx512(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes256_enc_vaes_avx512(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_des_cbc_enc_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_des_cbc_enc_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_des_cbc_dec_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_des_cbc_dec_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_3des_cbc_enc_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_3des_cbc_enc_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_3des_cbc_dec_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_3des_cbc_dec_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_docsis_des_enc_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_docsis_des_enc_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_docsis_des_dec_avx512(MB_MGR_DES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_docsis_des_dec_avx512(MB_MGR_DES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cntr_avx(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_aes_cntr_bit_avx(JOB_AES_HMAC *job);
+
+#define SAVE_XMMS save_xmms_avx
+#define RESTORE_XMMS restore_xmms_avx
+
+#define SUBMIT_JOB_AES128_ENC submit_job_aes128_enc_avx512
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_avx512
+#define FLUSH_JOB_AES128_ENC flush_job_aes128_enc_avx512
+
+#define SUBMIT_JOB_AES192_ENC submit_job_aes192_enc_avx512
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_avx512
+#define FLUSH_JOB_AES192_ENC flush_job_aes192_enc_avx512
+
+#define SUBMIT_JOB_AES256_ENC submit_job_aes256_enc_avx512
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_avx512
+#define FLUSH_JOB_AES256_ENC flush_job_aes256_enc_avx512
+
+#define SUBMIT_JOB_AES_ECB_128_ENC submit_job_aes_ecb_128_enc_avx
+#define SUBMIT_JOB_AES_ECB_128_DEC submit_job_aes_ecb_128_dec_avx
+#define SUBMIT_JOB_AES_ECB_192_ENC submit_job_aes_ecb_192_enc_avx
+#define SUBMIT_JOB_AES_ECB_192_DEC submit_job_aes_ecb_192_dec_avx
+#define SUBMIT_JOB_AES_ECB_256_ENC submit_job_aes_ecb_256_enc_avx
+#define SUBMIT_JOB_AES_ECB_256_DEC submit_job_aes_ecb_256_dec_avx
+
+#define SUBMIT_JOB_AES_CNTR submit_job_aes_cntr_avx512
+#define SUBMIT_JOB_AES_CNTR_BIT submit_job_aes_cntr_bit_avx512
+
+#define AES_CBC_DEC_128 aes_cbc_dec_128_avx512
+#define AES_CBC_DEC_192 aes_cbc_dec_192_avx512
+#define AES_CBC_DEC_256 aes_cbc_dec_256_avx512
+
+#define AES_CNTR_128 aes_cntr_128_avx
+#define AES_CNTR_192 aes_cntr_192_avx
+#define AES_CNTR_256 aes_cntr_256_avx
+
+#define AES_CNTR_CCM_128 aes_cntr_ccm_128_avx
+
+#define AES_ECB_ENC_128 aes_ecb_enc_128_avx
+#define AES_ECB_ENC_192 aes_ecb_enc_192_avx
+#define AES_ECB_ENC_256 aes_ecb_enc_256_avx
+#define AES_ECB_DEC_128 aes_ecb_dec_128_avx
+#define AES_ECB_DEC_192 aes_ecb_dec_192_avx
+#define AES_ECB_DEC_256 aes_ecb_dec_256_avx
+
+#define SUBMIT_JOB_PON_ENC submit_job_pon_enc_avx
+#define SUBMIT_JOB_PON_DEC submit_job_pon_dec_avx
+#define SUBMIT_JOB_PON_ENC_NO_CTR submit_job_pon_enc_no_ctr_avx
+#define SUBMIT_JOB_PON_DEC_NO_CTR submit_job_pon_dec_no_ctr_avx
+
+#define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_avx
+#define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_avx
+
+#define SUBMIT_JOB_DES_CBC_ENC submit_job_des_cbc_enc_avx512
+#define FLUSH_JOB_DES_CBC_ENC flush_job_des_cbc_enc_avx512
+
+#define SUBMIT_JOB_DES_CBC_DEC submit_job_des_cbc_dec_avx512
+#define FLUSH_JOB_DES_CBC_DEC flush_job_des_cbc_dec_avx512
+
+#define SUBMIT_JOB_3DES_CBC_ENC submit_job_3des_cbc_enc_avx512
+#define FLUSH_JOB_3DES_CBC_ENC flush_job_3des_cbc_enc_avx512
+
+#define SUBMIT_JOB_3DES_CBC_DEC submit_job_3des_cbc_dec_avx512
+#define FLUSH_JOB_3DES_CBC_DEC flush_job_3des_cbc_dec_avx512
+
+#define SUBMIT_JOB_DOCSIS_DES_ENC submit_job_docsis_des_enc_avx512
+#define FLUSH_JOB_DOCSIS_DES_ENC flush_job_docsis_des_enc_avx512
+
+#define SUBMIT_JOB_DOCSIS_DES_DEC submit_job_docsis_des_dec_avx512
+#define FLUSH_JOB_DOCSIS_DES_DEC flush_job_docsis_des_dec_avx512
+
+#define SUBMIT_JOB_AES_ENC SUBMIT_JOB_AES_ENC_AVX512
+#define FLUSH_JOB_AES_ENC FLUSH_JOB_AES_ENC_AVX512
+#define SUBMIT_JOB_AES_DEC SUBMIT_JOB_AES_DEC_AVX512
+
+JOB_AES_HMAC *submit_job_hmac_avx512(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_avx512(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_avx512(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_avx512(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_avx512(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_avx512(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_384_avx512(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_384_avx512(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_512_avx512(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_512_avx512(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_md5_avx2(MB_MGR_HMAC_MD5_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_md5_avx2(MB_MGR_HMAC_MD5_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_cmac_auth_avx(MB_MGR_CMAC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_ccm_auth_avx(MB_MGR_CCM_OOO *state);
+
+#define SUBMIT_JOB_HMAC submit_job_hmac_avx512
+#define FLUSH_JOB_HMAC flush_job_hmac_avx512
+#define SUBMIT_JOB_HMAC_SHA_224 submit_job_hmac_sha_224_avx512
+#define FLUSH_JOB_HMAC_SHA_224 flush_job_hmac_sha_224_avx512
+#define SUBMIT_JOB_HMAC_SHA_256 submit_job_hmac_sha_256_avx512
+#define FLUSH_JOB_HMAC_SHA_256 flush_job_hmac_sha_256_avx512
+#define SUBMIT_JOB_HMAC_SHA_384 submit_job_hmac_sha_384_avx512
+#define FLUSH_JOB_HMAC_SHA_384 flush_job_hmac_sha_384_avx512
+#define SUBMIT_JOB_HMAC_SHA_512 submit_job_hmac_sha_512_avx512
+#define FLUSH_JOB_HMAC_SHA_512 flush_job_hmac_sha_512_avx512
+#define SUBMIT_JOB_HMAC_MD5 submit_job_hmac_md5_avx2
+#define FLUSH_JOB_HMAC_MD5 flush_job_hmac_md5_avx2
+
+#ifndef NO_GCM
+#define AES_GCM_DEC_128 aes_gcm_dec_128_avx512
+#define AES_GCM_ENC_128 aes_gcm_enc_128_avx512
+#define AES_GCM_DEC_192 aes_gcm_dec_192_avx512
+#define AES_GCM_ENC_192 aes_gcm_enc_192_avx512
+#define AES_GCM_DEC_256 aes_gcm_dec_256_avx512
+#define AES_GCM_ENC_256 aes_gcm_enc_256_avx512
+
+#define AES_GCM_DEC_128_VAES aes_gcm_dec_128_vaes_avx512
+#define AES_GCM_ENC_128_VAES aes_gcm_enc_128_vaes_avx512
+#define AES_GCM_DEC_192_VAES aes_gcm_dec_192_vaes_avx512
+#define AES_GCM_ENC_192_VAES aes_gcm_enc_192_vaes_avx512
+#define AES_GCM_DEC_256_VAES aes_gcm_dec_256_vaes_avx512
+#define AES_GCM_ENC_256_VAES aes_gcm_enc_256_vaes_avx512
+
+#define SUBMIT_JOB_AES_GCM_DEC submit_job_aes_gcm_dec_avx512
+#define FLUSH_JOB_AES_GCM_DEC flush_job_aes_gcm_avx512
+#define SUBMIT_JOB_AES_GCM_ENC submit_job_aes_gcm_enc_avx512
+#define FLUSH_JOB_AES_GCM_ENC flush_job_aes_gcm_avx512
+#endif /* NO_GCM */
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB submit_job_avx512
+#define FLUSH_JOB flush_job_avx512
+#define QUEUE_SIZE queue_size_avx512
+#define SUBMIT_JOB_NOCHECK submit_job_nocheck_avx512
+#define GET_NEXT_JOB get_next_job_avx512
+#define GET_COMPLETED_JOB get_completed_job_avx512
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB_HASH SUBMIT_JOB_HASH_AVX512
+#define FLUSH_JOB_HASH FLUSH_JOB_HASH_AVX512
+
+/* ====================================================================== */
+
+#define AES_CFB_128_ONE aes_cfb_128_one_avx512
+
+void aes128_cbc_mac_x8(AES_ARGS *args, uint64_t len);
+
+#define AES128_CBC_MAC aes128_cbc_mac_x8
+
+#define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_avx
+#define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_avx
+
+#define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_avx
+#define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_avx
+
+/* ====================================================================== */
+
+/*
+ * GCM submit / flush API for AVX512 arch
+ */
+#ifndef NO_GCM
+static JOB_AES_HMAC *
+plain_submit_gcm_dec_avx512(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+plain_submit_gcm_enc_avx512(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+vaes_submit_gcm_dec_avx512(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128_VAES(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192_VAES(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256_VAES(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+vaes_submit_gcm_enc_avx512(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128_VAES(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192_VAES(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256_VAES(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad,
+ job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_avx512(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+
+static JOB_AES_HMAC *(*submit_job_aes_gcm_enc_avx512)
+ (MB_MGR *state, JOB_AES_HMAC *job) = plain_submit_gcm_enc_avx512;
+
+static JOB_AES_HMAC *(*submit_job_aes_gcm_dec_avx512)
+ (MB_MGR *state, JOB_AES_HMAC *job) = plain_submit_gcm_dec_avx512;
+
+#endif /* NO_GCM */
+
+static JOB_AES_HMAC *(*submit_job_aes_cntr_avx512)
+ (JOB_AES_HMAC *job) = submit_job_aes_cntr_avx;
+static JOB_AES_HMAC *(*submit_job_aes_cntr_bit_avx512)
+ (JOB_AES_HMAC *job) = submit_job_aes_cntr_bit_avx;
+
+static JOB_AES_HMAC *
+vaes_submit_cntr_avx512(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ aes_cntr_128_submit_vaes_avx512(job);
+ else if (24 == job->aes_key_len_in_bytes)
+ aes_cntr_192_submit_vaes_avx512(job);
+ else /* assume 32 bytes */
+ aes_cntr_256_submit_vaes_avx512(job);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+static JOB_AES_HMAC *
+vaes_submit_cntr_bit_avx512(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_128_submit_vaes_avx512(job);
+ else if (24 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_192_submit_vaes_avx512(job);
+ else /* assume 32 bytes */
+ aes_cntr_bit_256_submit_vaes_avx512(job);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ====================================================================== */
+
+static JOB_AES_HMAC *
+(*submit_job_aes128_enc_avx512)
+ (MB_MGR_AES_OOO *state, JOB_AES_HMAC *job) = submit_job_aes128_enc_avx;
+
+static JOB_AES_HMAC *
+(*submit_job_aes192_enc_avx512)
+ (MB_MGR_AES_OOO *state, JOB_AES_HMAC *job) = submit_job_aes192_enc_avx;
+
+static JOB_AES_HMAC *
+(*submit_job_aes256_enc_avx512)
+ (MB_MGR_AES_OOO *state, JOB_AES_HMAC *job) = submit_job_aes256_enc_avx;
+
+static JOB_AES_HMAC *
+(*flush_job_aes128_enc_avx512)
+ (MB_MGR_AES_OOO *state) = flush_job_aes128_enc_avx;
+
+static JOB_AES_HMAC *
+(*flush_job_aes192_enc_avx512)
+ (MB_MGR_AES_OOO *state) = flush_job_aes192_enc_avx;
+
+static JOB_AES_HMAC *
+(*flush_job_aes256_enc_avx512)
+ (MB_MGR_AES_OOO *state) = flush_job_aes256_enc_avx;
+
+static void
+(*aes_cbc_dec_128_avx512) (const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes) = aes_cbc_dec_128_avx;
+static void
+(*aes_cbc_dec_192_avx512) (const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes) = aes_cbc_dec_192_avx;
+static void
+(*aes_cbc_dec_256_avx512) (const void *in, const uint8_t *IV,
+ const void *keys, void *out,
+ uint64_t len_bytes) = aes_cbc_dec_256_avx;
+
+void
+init_mb_mgr_avx512(MB_MGR *state)
+{
+ unsigned int j, vaes_support = 0;
+ uint8_t *p;
+ size_t size;
+
+ state->features = cpu_feature_adjust(state->flags,
+ cpu_feature_detect());
+
+ if (!(state->features & IMB_FEATURE_AESNI)) {
+ init_mb_mgr_sse_no_aesni(state);
+ return;
+ }
+ if ((state->features & IMB_FEATURE_VAES) == IMB_FEATURE_VAES) {
+ vaes_support = 1;
+ aes_cbc_dec_128_avx512 = aes_cbc_dec_128_vaes_avx512;
+ aes_cbc_dec_192_avx512 = aes_cbc_dec_192_vaes_avx512;
+ aes_cbc_dec_256_avx512 = aes_cbc_dec_256_vaes_avx512;
+ submit_job_aes128_enc_avx512 =
+ submit_job_aes128_enc_vaes_avx512;
+ flush_job_aes128_enc_avx512 =
+ flush_job_aes128_enc_vaes_avx512;
+ submit_job_aes192_enc_avx512 =
+ submit_job_aes192_enc_vaes_avx512;
+ flush_job_aes192_enc_avx512 =
+ flush_job_aes192_enc_vaes_avx512;
+ submit_job_aes256_enc_avx512 =
+ submit_job_aes256_enc_vaes_avx512;
+ flush_job_aes256_enc_avx512 =
+ flush_job_aes256_enc_vaes_avx512;
+ }
+
+ /* Init AES out-of-order fields */
+ if (vaes_support) {
+ /* init 16 lanes */
+ memset(state->aes128_ooo.lens, 0,
+ sizeof(state->aes128_ooo.lens));
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+ memset(state->aes192_ooo.lens, 0,
+ sizeof(state->aes192_ooo.lens));
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+ memset(state->aes256_ooo.lens, 0,
+ sizeof(state->aes256_ooo.lens));
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->aes256_ooo.num_lanes_inuse = 0;
+ } else {
+ /* init 8 lanes */
+ memset(state->aes128_ooo.lens, 0xFF,
+ sizeof(state->aes128_ooo.lens));
+ memset(&state->aes128_ooo.lens[0], 0,
+ sizeof(state->aes128_ooo.lens[0]) * 8);
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xF76543210;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+ memset(state->aes192_ooo.lens, 0xFF,
+ sizeof(state->aes192_ooo.lens));
+ memset(&state->aes192_ooo.lens[0], 0,
+ sizeof(state->aes192_ooo.lens[0]) * 8);
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xF76543210;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+ memset(&state->aes256_ooo.lens, 0xFF,
+ sizeof(state->aes256_ooo.lens));
+ memset(&state->aes256_ooo.lens[0], 0,
+ sizeof(state->aes256_ooo.lens[0]) * 8);
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xF76543210;
+ state->aes256_ooo.num_lanes_inuse = 0;
+ }
+
+
+ /* DOCSIS SEC BPI (AES CBC + AES CFB for partial block)
+ * uses same settings as AES128 CBC.
+ */
+ if (vaes_support) {
+ /* init 16 lanes */
+ memset(state->docsis_sec_ooo.lens, 0,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+ } else {
+ /* init 8 lanes */
+ memset(state->docsis_sec_ooo.lens, 0xFF,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(&state->docsis_sec_ooo.lens[0], 0,
+ sizeof(state->docsis_sec_ooo.lens[0]) * 8);
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xF76543210;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+ }
+
+
+ /* DES, 3DES and DOCSIS DES (DES CBC + DES CFB for partial block) */
+ /* - separate DES OOO for encryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->des_enc_ooo.lens[j] = 0;
+ state->des_enc_ooo.job_in_lane[j] = NULL;
+ }
+ state->des_enc_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->des_enc_ooo.num_lanes_inuse = 0;
+ memset(&state->des_enc_ooo.args, 0, sizeof(state->des_enc_ooo.args));
+
+ /* - separate DES OOO for decryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->des_dec_ooo.lens[j] = 0;
+ state->des_dec_ooo.job_in_lane[j] = NULL;
+ }
+ state->des_dec_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->des_dec_ooo.num_lanes_inuse = 0;
+ memset(&state->des_dec_ooo.args, 0, sizeof(state->des_dec_ooo.args));
+
+ /* - separate 3DES OOO for encryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->des3_enc_ooo.lens[j] = 0;
+ state->des3_enc_ooo.job_in_lane[j] = NULL;
+ }
+ state->des3_enc_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->des3_enc_ooo.num_lanes_inuse = 0;
+ memset(&state->des3_enc_ooo.args, 0, sizeof(state->des3_enc_ooo.args));
+
+ /* - separate 3DES OOO for decryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->des3_dec_ooo.lens[j] = 0;
+ state->des3_dec_ooo.job_in_lane[j] = NULL;
+ }
+ state->des3_dec_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->des3_dec_ooo.num_lanes_inuse = 0;
+ memset(&state->des3_dec_ooo.args, 0, sizeof(state->des3_dec_ooo.args));
+
+ /* - separate DOCSIS DES OOO for encryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->docsis_des_enc_ooo.lens[j] = 0;
+ state->docsis_des_enc_ooo.job_in_lane[j] = NULL;
+ }
+ state->docsis_des_enc_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->docsis_des_enc_ooo.num_lanes_inuse = 0;
+ memset(&state->docsis_des_enc_ooo.args, 0,
+ sizeof(state->docsis_des_enc_ooo.args));
+
+ /* - separate DES OOO for decryption */
+ for (j = 0; j < AVX512_NUM_DES_LANES; j++) {
+ state->docsis_des_dec_ooo.lens[j] = 0;
+ state->docsis_des_dec_ooo.job_in_lane[j] = NULL;
+ }
+ state->docsis_des_dec_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->docsis_des_dec_ooo.num_lanes_inuse = 0;
+ memset(&state->docsis_des_dec_ooo.args, 0,
+ sizeof(state->docsis_des_dec_ooo.args));
+
+ /* Init HMAC/SHA1 out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0;
+ state->hmac_sha_1_ooo.lens[3] = 0;
+ state->hmac_sha_1_ooo.lens[4] = 0;
+ state->hmac_sha_1_ooo.lens[5] = 0;
+ state->hmac_sha_1_ooo.lens[6] = 0;
+ state->hmac_sha_1_ooo.lens[7] = 0;
+ state->hmac_sha_1_ooo.lens[8] = 0;
+ state->hmac_sha_1_ooo.lens[9] = 0;
+ state->hmac_sha_1_ooo.lens[10] = 0;
+ state->hmac_sha_1_ooo.lens[11] = 0;
+ state->hmac_sha_1_ooo.lens[12] = 0;
+ state->hmac_sha_1_ooo.lens[13] = 0;
+ state->hmac_sha_1_ooo.lens[14] = 0;
+ state->hmac_sha_1_ooo.lens[15] = 0;
+ state->hmac_sha_1_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->hmac_sha_1_ooo.num_lanes_inuse = 0;
+ for (j = 0; j < AVX512_NUM_SHA1_LANES; j++) {
+ state->hmac_sha_1_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_1_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_1_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64 + 7);
+ p = state->hmac_sha_1_ooo.ldata[j].outer_block;
+ memset(p + 5*4 + 1,
+ 0x00,
+ 64 - 5*4 - 1 - 2);
+ p[5 * 4] = 0x80;
+ p[64 - 2] = 0x02;
+ p[64 - 1] = 0xA0;
+ }
+
+ /* Init HMAC/SHA224 out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0;
+ state->hmac_sha_224_ooo.lens[3] = 0;
+ state->hmac_sha_224_ooo.lens[4] = 0;
+ state->hmac_sha_224_ooo.lens[5] = 0;
+ state->hmac_sha_224_ooo.lens[6] = 0;
+ state->hmac_sha_224_ooo.lens[7] = 0;
+ state->hmac_sha_224_ooo.lens[8] = 0;
+ state->hmac_sha_224_ooo.lens[9] = 0;
+ state->hmac_sha_224_ooo.lens[10] = 0;
+ state->hmac_sha_224_ooo.lens[11] = 0;
+ state->hmac_sha_224_ooo.lens[12] = 0;
+ state->hmac_sha_224_ooo.lens[13] = 0;
+ state->hmac_sha_224_ooo.lens[14] = 0;
+ state->hmac_sha_224_ooo.lens[15] = 0;
+ state->hmac_sha_224_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->hmac_sha_224_ooo.num_lanes_inuse = 0;
+ /* sha256 and sha224 are very similar except for
+ * digest constants and output size
+ */
+ for (j = 0; j < AVX512_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_224_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_sha_224_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_sha_224_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[7 * 4] = 0x80; /* digest 7 words long */
+ p[64 - 2] = 0x02; /* length in little endian = 0x02E0 */
+ p[64 - 1] = 0xE0;
+ }
+
+ /* Init HMAC/SHA256 out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0;
+ state->hmac_sha_256_ooo.lens[3] = 0;
+ state->hmac_sha_256_ooo.lens[4] = 0;
+ state->hmac_sha_256_ooo.lens[5] = 0;
+ state->hmac_sha_256_ooo.lens[6] = 0;
+ state->hmac_sha_256_ooo.lens[7] = 0;
+ state->hmac_sha_256_ooo.lens[8] = 0;
+ state->hmac_sha_256_ooo.lens[9] = 0;
+ state->hmac_sha_256_ooo.lens[10] = 0;
+ state->hmac_sha_256_ooo.lens[11] = 0;
+ state->hmac_sha_256_ooo.lens[12] = 0;
+ state->hmac_sha_256_ooo.lens[13] = 0;
+ state->hmac_sha_256_ooo.lens[14] = 0;
+ state->hmac_sha_256_ooo.lens[15] = 0;
+ state->hmac_sha_256_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->hmac_sha_256_ooo.num_lanes_inuse = 0;
+ for (j = 0; j < AVX512_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_256_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_256_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_256_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64 + 7);
+ /* hmac related */
+ p = state->hmac_sha_256_ooo.ldata[j].outer_block;
+ memset(p + 8*4 + 1,
+ 0x00,
+ 64 - 8*4 - 1 - 2);
+ p[8 * 4] = 0x80; /* 8 digest words */
+ p[64 - 2] = 0x03; /* length */
+ p[64 - 1] = 0x00;
+ }
+
+ /* Init HMAC/SHA384 out-of-order fields */
+ state->hmac_sha_384_ooo.lens[0] = 0;
+ state->hmac_sha_384_ooo.lens[1] = 0;
+ state->hmac_sha_384_ooo.lens[2] = 0;
+ state->hmac_sha_384_ooo.lens[3] = 0;
+ state->hmac_sha_384_ooo.lens[4] = 0;
+ state->hmac_sha_384_ooo.lens[5] = 0;
+ state->hmac_sha_384_ooo.lens[6] = 0;
+ state->hmac_sha_384_ooo.lens[7] = 0;
+ state->hmac_sha_384_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < AVX512_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_384_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_384_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_384_BLOCK_SIZE + 1),
+ 0x00, SHA_384_BLOCK_SIZE + 7);
+ p = ctx->ldata[j].outer_block;
+ /* special end point because this length is constant */
+ memset(p + SHA384_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ SHA_384_BLOCK_SIZE -
+ SHA384_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA384_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /* hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 384 bits, 1408 bits == 0x0580.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_384_BLOCK_SIZE - 2] = 0x05;
+ p[SHA_384_BLOCK_SIZE - 1] = 0x80;
+ }
+
+ /* Init HMAC/SHA512 out-of-order fields */
+ state->hmac_sha_512_ooo.lens[0] = 0;
+ state->hmac_sha_512_ooo.lens[1] = 0;
+ state->hmac_sha_512_ooo.lens[2] = 0;
+ state->hmac_sha_512_ooo.lens[3] = 0;
+ state->hmac_sha_512_ooo.lens[4] = 0;
+ state->hmac_sha_512_ooo.lens[5] = 0;
+ state->hmac_sha_512_ooo.lens[6] = 0;
+ state->hmac_sha_512_ooo.lens[7] = 0;
+ state->hmac_sha_512_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < AVX512_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_512_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_512_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_512_BLOCK_SIZE + 1),
+ 0x00, SHA_512_BLOCK_SIZE + 7);
+ p = ctx->ldata[j].outer_block;
+ /* special end point because this length is constant */
+ memset(p + SHA512_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ SHA_512_BLOCK_SIZE -
+ SHA512_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ /* mark the end */
+ p[SHA512_DIGEST_SIZE_IN_BYTES] = 0x80;
+ /* hmac outer block length always of fixed size,
+ * it is OKey length, a whole message block length, 1024 bits,
+ * with padding plus the length of the inner digest,
+ * which is 512 bits, 1536 bits == 0x600.
+ * The input message block needs to be converted to big endian
+ * within the sha implementation before use.
+ */
+ p[SHA_512_BLOCK_SIZE - 2] = 0x06;
+ p[SHA_512_BLOCK_SIZE - 1] = 0x00;
+ }
+
+ /* Init HMAC/MD5 out-of-order fields */
+ state->hmac_md5_ooo.lens[0] = 0;
+ state->hmac_md5_ooo.lens[1] = 0;
+ state->hmac_md5_ooo.lens[2] = 0;
+ state->hmac_md5_ooo.lens[3] = 0;
+ state->hmac_md5_ooo.lens[4] = 0;
+ state->hmac_md5_ooo.lens[5] = 0;
+ state->hmac_md5_ooo.lens[6] = 0;
+ state->hmac_md5_ooo.lens[7] = 0;
+ state->hmac_md5_ooo.lens[8] = 0;
+ state->hmac_md5_ooo.lens[9] = 0;
+ state->hmac_md5_ooo.lens[10] = 0;
+ state->hmac_md5_ooo.lens[11] = 0;
+ state->hmac_md5_ooo.lens[12] = 0;
+ state->hmac_md5_ooo.lens[13] = 0;
+ state->hmac_md5_ooo.lens[14] = 0;
+ state->hmac_md5_ooo.lens[15] = 0;
+ state->hmac_md5_ooo.unused_lanes = 0xFEDCBA9876543210;
+ state->hmac_md5_ooo.num_lanes_inuse = 0;
+ for (j = 0; j < AVX512_NUM_MD5_LANES; j++) {
+ state->hmac_md5_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_md5_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_md5_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[4 * 4] = 0x80;
+ p[64 - 7] = 0x02;
+ p[64 - 8] = 0x80;
+ }
+
+ /* Init AES/XCBC OOO fields */
+ state->aes_xcbc_ooo.lens[0] = 0;
+ state->aes_xcbc_ooo.lens[1] = 0;
+ state->aes_xcbc_ooo.lens[2] = 0;
+ state->aes_xcbc_ooo.lens[3] = 0;
+ state->aes_xcbc_ooo.lens[4] = 0;
+ state->aes_xcbc_ooo.lens[5] = 0;
+ state->aes_xcbc_ooo.lens[6] = 0;
+ state->aes_xcbc_ooo.lens[7] = 0;
+ state->aes_xcbc_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < 8 ; j++) {
+ state->aes_xcbc_ooo.ldata[j].job_in_lane = NULL;
+ state->aes_xcbc_ooo.ldata[j].final_block[16] = 0x80;
+ memset(state->aes_xcbc_ooo.ldata[j].final_block + 17, 0x00, 15);
+ }
+
+ /* Init AES-CCM auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_ccm_ooo.init_done[j] = 0;
+ state->aes_ccm_ooo.lens[j] = 0;
+ state->aes_ccm_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_ccm_ooo.unused_lanes = 0xF76543210;
+
+ /* Init AES-CMAC auth out-of-order fields */
+ for (j = 0; j < 8; j++) {
+ state->aes_cmac_ooo.init_done[j] = 0;
+ state->aes_cmac_ooo.lens[j] = 0;
+ state->aes_cmac_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_cmac_ooo.unused_lanes = 0xF76543210;
+
+ /* Init "in order" components */
+ state->next_job = 0;
+ state->earliest_job = -1;
+
+ /* set handlers */
+ state->get_next_job = get_next_job_avx512;
+ state->submit_job = submit_job_avx512;
+ state->submit_job_nocheck = submit_job_nocheck_avx512;
+ state->get_completed_job = get_completed_job_avx512;
+ state->flush_job = flush_job_avx512;
+ state->queue_size = queue_size_avx512;
+ state->keyexp_128 = aes_keyexp_128_avx512;
+ state->keyexp_192 = aes_keyexp_192_avx512;
+ state->keyexp_256 = aes_keyexp_256_avx512;
+ state->cmac_subkey_gen_128 = aes_cmac_subkey_gen_avx512;
+ state->xcbc_keyexp = aes_xcbc_expand_key_avx512;
+ state->des_key_sched = des_key_schedule;
+ state->sha1_one_block = sha1_one_block_avx512;
+ state->sha1 = sha1_avx512;
+ state->sha224_one_block = sha224_one_block_avx512;
+ state->sha224 = sha224_avx512;
+ state->sha256_one_block = sha256_one_block_avx512;
+ state->sha256 = sha256_avx512;
+ state->sha384_one_block = sha384_one_block_avx512;
+ state->sha384 = sha384_avx512;
+ state->sha512_one_block = sha512_one_block_avx512;
+ state->sha512 = sha512_avx512;
+ state->md5_one_block = md5_one_block_avx512;
+ state->aes128_cfb_one = aes_cfb_128_one_avx512;
+
+ state->eea3_1_buffer = zuc_eea3_1_buffer_avx;
+ state->eea3_4_buffer = zuc_eea3_4_buffer_avx;
+ state->eea3_n_buffer = zuc_eea3_n_buffer_avx;
+ state->eia3_1_buffer = zuc_eia3_1_buffer_avx;
+
+ state->f8_1_buffer = kasumi_f8_1_buffer_avx;
+ state->f8_1_buffer_bit = kasumi_f8_1_buffer_bit_avx;
+ state->f8_2_buffer = kasumi_f8_2_buffer_avx;
+ state->f8_3_buffer = kasumi_f8_3_buffer_avx;
+ state->f8_4_buffer = kasumi_f8_4_buffer_avx;
+ state->f8_n_buffer = kasumi_f8_n_buffer_avx;
+ state->f9_1_buffer = kasumi_f9_1_buffer_avx;
+ state->f9_1_buffer_user = kasumi_f9_1_buffer_user_avx;
+ state->kasumi_init_f8_key_sched = kasumi_init_f8_key_sched_avx;
+ state->kasumi_init_f9_key_sched = kasumi_init_f9_key_sched_avx;
+ state->kasumi_key_sched_size = kasumi_key_sched_size_avx;
+
+ state->snow3g_f8_1_buffer_bit = snow3g_f8_1_buffer_bit_avx2;
+ state->snow3g_f8_1_buffer = snow3g_f8_1_buffer_avx2;
+ state->snow3g_f8_2_buffer = snow3g_f8_2_buffer_avx2;
+ state->snow3g_f8_4_buffer = snow3g_f8_4_buffer_avx2;
+ state->snow3g_f8_8_buffer = snow3g_f8_8_buffer_avx2;
+ state->snow3g_f8_n_buffer = snow3g_f8_n_buffer_avx2;
+ state->snow3g_f8_8_buffer_multikey = snow3g_f8_8_buffer_multikey_avx2;
+ state->snow3g_f8_n_buffer_multikey = snow3g_f8_n_buffer_multikey_avx2;
+ state->snow3g_f9_1_buffer = snow3g_f9_1_buffer_avx2;
+ state->snow3g_init_key_sched = snow3g_init_key_sched_avx2;
+ state->snow3g_key_sched_size = snow3g_key_sched_size_avx2;
+
+ if ((state->features & IMB_FEATURE_VAES) == IMB_FEATURE_VAES) {
+ submit_job_aes_cntr_avx512 = vaes_submit_cntr_avx512;
+ submit_job_aes_cntr_bit_avx512 = vaes_submit_cntr_bit_avx512;
+ }
+#ifndef NO_GCM
+ if ((state->features & (IMB_FEATURE_VAES | IMB_FEATURE_VPCLMULQDQ)) ==
+ (IMB_FEATURE_VAES | IMB_FEATURE_VPCLMULQDQ)) {
+ state->gcm128_enc = aes_gcm_enc_128_vaes_avx512;
+ state->gcm192_enc = aes_gcm_enc_192_vaes_avx512;
+ state->gcm256_enc = aes_gcm_enc_256_vaes_avx512;
+ state->gcm128_dec = aes_gcm_dec_128_vaes_avx512;
+ state->gcm192_dec = aes_gcm_dec_192_vaes_avx512;
+ state->gcm256_dec = aes_gcm_dec_256_vaes_avx512;
+ state->gcm128_init = aes_gcm_init_128_vaes_avx512;
+ state->gcm192_init = aes_gcm_init_192_vaes_avx512;
+ state->gcm256_init = aes_gcm_init_256_vaes_avx512;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_vaes_avx512;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_vaes_avx512;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_vaes_avx512;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_vaes_avx512;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_vaes_avx512;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_vaes_avx512;
+ state->gcm128_enc_finalize =
+ aes_gcm_enc_128_finalize_vaes_avx512;
+ state->gcm192_enc_finalize =
+ aes_gcm_enc_192_finalize_vaes_avx512;
+ state->gcm256_enc_finalize =
+ aes_gcm_enc_256_finalize_vaes_avx512;
+ state->gcm128_dec_finalize =
+ aes_gcm_dec_128_finalize_vaes_avx512;
+ state->gcm192_dec_finalize =
+ aes_gcm_dec_192_finalize_vaes_avx512;
+ state->gcm256_dec_finalize =
+ aes_gcm_dec_256_finalize_vaes_avx512;
+ state->gcm128_precomp = aes_gcm_precomp_128_vaes_avx512;
+ state->gcm192_precomp = aes_gcm_precomp_192_vaes_avx512;
+ state->gcm256_precomp = aes_gcm_precomp_256_vaes_avx512;
+ state->gcm128_pre = aes_gcm_pre_128_vaes_avx512;
+ state->gcm192_pre = aes_gcm_pre_192_vaes_avx512;
+ state->gcm256_pre = aes_gcm_pre_256_vaes_avx512;
+
+ submit_job_aes_gcm_enc_avx512 = vaes_submit_gcm_enc_avx512;
+ submit_job_aes_gcm_dec_avx512 = vaes_submit_gcm_dec_avx512;
+ } else {
+ state->gcm128_enc = aes_gcm_enc_128_avx512;
+ state->gcm192_enc = aes_gcm_enc_192_avx512;
+ state->gcm256_enc = aes_gcm_enc_256_avx512;
+ state->gcm128_dec = aes_gcm_dec_128_avx512;
+ state->gcm192_dec = aes_gcm_dec_192_avx512;
+ state->gcm256_dec = aes_gcm_dec_256_avx512;
+ state->gcm128_init = aes_gcm_init_128_avx512;
+ state->gcm192_init = aes_gcm_init_192_avx512;
+ state->gcm256_init = aes_gcm_init_256_avx512;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_avx512;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_avx512;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_avx512;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_avx512;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_avx512;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_avx512;
+ state->gcm128_enc_finalize = aes_gcm_enc_128_finalize_avx512;
+ state->gcm192_enc_finalize = aes_gcm_enc_192_finalize_avx512;
+ state->gcm256_enc_finalize = aes_gcm_enc_256_finalize_avx512;
+ state->gcm128_dec_finalize = aes_gcm_dec_128_finalize_avx512;
+ state->gcm192_dec_finalize = aes_gcm_dec_192_finalize_avx512;
+ state->gcm256_dec_finalize = aes_gcm_dec_256_finalize_avx512;
+ state->gcm128_precomp = aes_gcm_precomp_128_avx512;
+ state->gcm192_precomp = aes_gcm_precomp_192_avx512;
+ state->gcm256_precomp = aes_gcm_precomp_256_avx512;
+ state->gcm128_pre = aes_gcm_pre_128_avx512;
+ state->gcm192_pre = aes_gcm_pre_192_avx512;
+ state->gcm256_pre = aes_gcm_pre_256_avx512;
+ }
+#endif
+}
+
+#include "mb_mgr_code.h"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_des_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_des_avx512.asm
new file mode 100644
index 000000000..decea625b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_des_avx512.asm
@@ -0,0 +1,524 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX R8 R9 R10 R11
+;; Windows preserves: RBX RCX RDX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX R10 R11
+;; Linux preserves: RBX RBP RSI RDI R8 R9 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31, K1-7 (K1-2 and K4-6 here but DES underneath clobbers K1-7).
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "constants.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern docsis_des_x16_enc_avx512
+extern docsis_des_x16_dec_avx512
+extern des_x16_cbc_enc_avx512
+extern des_x16_cbc_dec_avx512
+extern des3_x16_cbc_enc_avx512
+extern des3_x16_cbc_dec_avx512
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%endif
+
+%define STATE arg1
+%define JOB arg2
+
+%define IA0 arg3
+%define IA1 arg4
+%define IA2 r10
+
+%define MIN_IDX r11
+%define MIN_LEN rax
+%define LANE r11
+
+%define AVX512_NUM_DES_LANES 16
+
+%define ZTMP0 zmm0
+%define ZTMP1 zmm1
+%define ZTMP2 zmm2
+%define ZTMP3 zmm3
+%define ZTMP4 zmm4
+%define ZTMP5 zmm5
+%define ZTMP6 zmm6
+%define ZTMP7 zmm7
+%define ZTMP8 zmm8
+%define ZTMP9 zmm9
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+;;; ===========================================================================
+;;; DES/DOCSIS DES job submit
+;;; ===========================================================================
+;;; DES_DOCSIS [in] - DES, DOCSIS or 3DES cipher selection
+;;; ENC_DEC [in] - ENCrypt or DECrypt seection
+%macro GENERIC_DES_SUBMIT 2
+%define %%DES_DOCSIS %1
+%define %%ENC_DEC %2
+
+ ;; get unsued lane and increment number of lanes in use
+ mov IA0, [STATE + _des_unused_lanes]
+ mov LANE, IA0
+ and LANE, 0xF ;; just a nibble
+ shr IA0, 4
+ mov [STATE + _des_unused_lanes], IA0
+ add qword [STATE + _des_lanes_in_use], 1
+
+ ;; store job info in OOO structure
+ ;; - job pointer
+ mov [STATE + _des_job_in_lane + LANE*8], JOB
+ ;; - key schedule
+%ifidn %%ENC_DEC, ENC
+ mov IA2, [JOB + _aes_enc_key_expanded]
+%else
+ mov IA2, [JOB + _aes_dec_key_expanded]
+%endif
+ mov [STATE + _des_args_keys + LANE*8], IA2
+ ;; - IV
+ mov IA2, [JOB + _iv]
+ mov DWORD(IA0), [IA2]
+ mov DWORD(IA1), [IA2 + 4]
+ mov [STATE + _des_args_IV + LANE*4], DWORD(IA0)
+ mov [STATE + _des_args_IV + LANE*4 + (AVX512_NUM_DES_LANES*4)], DWORD(IA1)
+ ;; - src pointer
+ mov IA0, [JOB + _src]
+ add IA0, [JOB + _cipher_start_src_offset_in_bytes]
+ mov [STATE + _des_args_in + LANE*8], IA0
+ ;; - destination pointer
+ mov IA1, [JOB + _dst]
+ mov [STATE + _des_args_out + LANE*8], IA1
+ ;; - length in bytes (block aligned)
+ mov IA2, [JOB + _msg_len_to_cipher_in_bytes]
+ and IA2, -8
+ mov [STATE + _des_lens + LANE*2], WORD(IA2)
+%ifidn %%DES_DOCSIS, DOCSIS
+ ;; - block length
+ mov [STATE + _des_args_BLen + LANE*4], DWORD(IA2)
+ ;; - last in
+ add IA0, IA2
+ mov [STATE + _des_args_LIn + LANE*8], IA0
+ ;; - last out
+ add IA1, IA2
+ mov [STATE + _des_args_LOut + LANE*8], IA1
+ ;; - partial length
+ mov IA2, [JOB + _msg_len_to_cipher_in_bytes]
+ and IA2, 7
+ mov [STATE + _des_args_PLen + LANE*4], DWORD(IA2)
+%endif ; DOCSIS
+ ;; is there enough jobs to process them in parallel?
+ cmp qword [STATE + _des_lanes_in_use], AVX512_NUM_DES_LANES
+ jb %%_des_submit_null_end
+ ;; schedule the processing
+ ;; - find min job size
+ vmovdqa XWORD(ZTMP0), [STATE + _des_lens + 2*0]
+ vphminposuw XWORD(ZTMP2), XWORD(ZTMP0)
+ vpextrw DWORD(MIN_LEN), XWORD(ZTMP2), 0 ; min value
+ vpextrw DWORD(MIN_IDX), XWORD(ZTMP2), 1 ; min index
+ vmovdqa XWORD(ZTMP1), [STATE + _des_lens + 2*8]
+ vphminposuw XWORD(ZTMP2), XWORD(ZTMP1)
+ vpextrw DWORD(IA2), XWORD(ZTMP2), 0 ; min value
+ cmp DWORD(MIN_LEN), DWORD(IA2)
+ jle %%_use_min
+ vpextrw DWORD(MIN_IDX), XWORD(ZTMP2), 1 ; min index
+ add DWORD(MIN_IDX), 8 ; but index +8
+ mov MIN_LEN, IA2 ; min len
+%%_use_min:
+ cmp MIN_LEN, 0
+ je %%_len_is_0
+
+ vpbroadcastw XWORD(ZTMP3), WORD(MIN_LEN)
+ vpsubw XWORD(ZTMP0), XWORD(ZTMP0), XWORD(ZTMP3)
+ vmovdqa [STATE + _des_lens + 2*0], XWORD(ZTMP0)
+ vpsubw XWORD(ZTMP1), XWORD(ZTMP1), XWORD(ZTMP3)
+ vmovdqa [STATE + _des_lens + 2*8], XWORD(ZTMP1)
+
+ push MIN_IDX
+ mov arg2, MIN_LEN
+%ifidn %%ENC_DEC, ENC
+ ;; encrypt
+%ifidn %%DES_DOCSIS, DOCSIS
+ call docsis_des_x16_enc_avx512
+%endif
+%ifidn %%DES_DOCSIS, DES
+ call des_x16_cbc_enc_avx512
+%endif
+%ifidn %%DES_DOCSIS, 3DES
+ call des3_x16_cbc_enc_avx512
+%endif
+%else ; ENC
+ ;; decrypt
+%ifidn %%DES_DOCSIS, DOCSIS
+ call docsis_des_x16_dec_avx512
+%endif
+%ifidn %%DES_DOCSIS, DES
+ call des_x16_cbc_dec_avx512
+%endif
+%ifidn %%DES_DOCSIS, 3DES
+ call des3_x16_cbc_dec_avx512
+%endif
+%endif ; DEC
+ pop MIN_IDX
+ jmp %%_des_submit_end
+
+%%_des_submit_null_end:
+ xor rax, rax
+ jmp %%_des_submit_return
+
+%%_len_is_0:
+%ifidn %%DES_DOCSIS, DOCSIS
+ cmp dword [STATE + _des_args_PLen + MIN_IDX*4], 0
+ jz %%_des_submit_end
+ push MIN_IDX
+ xor arg2, arg2 ; len is 0
+%ifidn %%ENC_DEC, ENC
+ call docsis_des_x16_enc_avx512
+%else ; ENC
+ call docsis_des_x16_dec_avx512
+%endif ; DEC
+ pop MIN_IDX
+%endif ; DOCSIS
+ ;; fall trough
+%%_des_submit_end:
+ ;; return a job
+ ;; - decrement number of jobs in use
+ sub qword [STATE + _des_lanes_in_use], 1
+ ;; - put the lane back to free lanes pool
+ mov IA0, [STATE + _des_unused_lanes]
+ shl IA0, 4
+ or IA0, MIN_IDX
+ mov [STATE + _des_unused_lanes], IA0
+ ;; - mark job as complete
+ ;; - clear job pointer
+ mov rax, [STATE + _des_job_in_lane + MIN_IDX*8]
+ mov qword [STATE + _des_job_in_lane + MIN_IDX*8], 0
+ or dword [rax + _status], STS_COMPLETED_AES
+
+%ifdef SAFE_DATA
+ ;; Clear IV
+ mov dword [STATE + _des_args_IV + MIN_IDX*4], 0
+ mov dword [STATE + _des_args_IV + MIN_IDX*4 + (AVX512_NUM_DES_LANES*4)], 0
+%endif
+ vzeroupper
+%%_des_submit_return:
+%endmacro
+
+;;; ===========================================================================
+;;; DES/DOCSIS DES flush
+;;; ===========================================================================
+;;; DES_DOCSIS [in] - DES, DOCSIS or 3DES cipher selection
+;;; ENC_DEC [in] - ENCrypt or DECrypt selection
+;;;
+;;; Clobbers k1, k2, k4, k5 and k6
+%macro GENERIC_DES_FLUSH 2
+%define %%DES_DOCSIS %1
+%define %%ENC_DEC %2
+
+ cmp qword [STATE + _des_lanes_in_use], 0
+ je %%_des_flush_null_end
+
+ ;; find non-null job
+ vpxord ZTMP0, ZTMP0, ZTMP0
+ vmovdqu64 ZTMP1, [STATE + _des_job_in_lane + (0*PTR_SZ)]
+ vmovdqu64 ZTMP2, [STATE + _des_job_in_lane + (8*PTR_SZ)]
+ vpcmpq k1, ZTMP1, ZTMP0, 4 ; NEQ
+ vpcmpq k2, ZTMP2, ZTMP0, 4 ; NEQ
+ xor IA0, IA0
+ xor IA1, IA1
+ kmovw DWORD(IA0), k1
+ kmovw DWORD(IA1), k2
+ mov DWORD(IA2), DWORD(IA1)
+ shl DWORD(IA2), 8
+ or DWORD(IA2), DWORD(IA0) ; mask of non-null jobs in IA2
+ not BYTE(IA0)
+ kmovw k4, DWORD(IA0)
+ not BYTE(IA1)
+ kmovw k5, DWORD(IA1)
+ mov DWORD(IA0), DWORD(IA2)
+ not WORD(IA0)
+ kmovw k6, DWORD(IA0) ; mask of NULL jobs in k4, k5 and k6
+ mov DWORD(IA0), DWORD(IA2)
+ xor IA2, IA2
+ bsf WORD(IA2), WORD(IA0) ; index of the 1st set bit in IA2
+
+ ;; copy good lane data into NULL lanes
+ ;; - k1(L8)/k2(H8) - masks of non-null jobs
+ ;; - k4(L8)/k5(H8)/k6 - masks of NULL jobs
+ ;; - IA2 index of 1st non-null job
+
+ ;; - in pointer
+ mov IA0, [STATE + _des_args_in + IA2*8]
+ vpbroadcastq ZTMP1, IA0
+ vmovdqu64 [STATE + _des_args_in + (0*PTR_SZ)]{k4}, ZTMP1
+ vmovdqu64 [STATE + _des_args_in + (8*PTR_SZ)]{k5}, ZTMP1
+ ;; - out pointer
+ mov IA0, [STATE + _des_args_out + IA2*8]
+ vpbroadcastq ZTMP1, IA0
+ vmovdqu64 [STATE + _des_args_out + (0*PTR_SZ)]{k4}, ZTMP1
+ vmovdqu64 [STATE + _des_args_out + (8*PTR_SZ)]{k5}, ZTMP1
+ ;; - key schedule
+ mov IA0, [STATE + _des_args_keys + IA2*8]
+ vpbroadcastq ZTMP1, IA0
+ vmovdqu64 [STATE + _des_args_keys + (0*PTR_SZ)]{k4}, ZTMP1
+ vmovdqu64 [STATE + _des_args_keys + (8*PTR_SZ)]{k5}, ZTMP1
+ ;; - zero partial len
+ vmovdqu32 [STATE + _des_args_PLen]{k6}, ZTMP0
+ ;; - set len to UINT16_MAX
+ mov WORD(IA0), 0xffff
+ vpbroadcastw ZTMP1, WORD(IA0)
+ vmovdqu16 [STATE + _des_lens]{k6}, ZTMP1
+
+ ;; - IV
+ mov DWORD(IA0), [STATE + _des_args_IV + IA2*4]
+ mov DWORD(IA1), [STATE + _des_args_IV + IA2*4 + (16*4)]
+ vpbroadcastd ZTMP1, DWORD(IA0)
+ vpbroadcastd ZTMP2, DWORD(IA1)
+ vmovdqu32 [STATE + _des_args_IV]{k6}, ZTMP1
+ vmovdqu32 [STATE + _des_args_IV + (16*4)]{k6}, ZTMP2
+
+ ;; schedule the processing
+ ;; - find min job size
+ vmovdqa XWORD(ZTMP0), [STATE + _des_lens + 2*0]
+ vphminposuw XWORD(ZTMP2), XWORD(ZTMP0)
+ vpextrw DWORD(MIN_LEN), XWORD(ZTMP2), 0 ; min value
+ vpextrw DWORD(MIN_IDX), XWORD(ZTMP2), 1 ; min index
+ vmovdqa XWORD(ZTMP1), [STATE + _des_lens + 2*8]
+ vphminposuw XWORD(ZTMP2), XWORD(ZTMP1)
+ vpextrw DWORD(IA2), XWORD(ZTMP2), 0 ; min value
+ cmp DWORD(MIN_LEN), DWORD(IA2)
+ jle %%_use_min
+ vpextrw DWORD(MIN_IDX), XWORD(ZTMP2), 1 ; min index
+ add DWORD(MIN_IDX), 8 ; but index +8
+ mov MIN_LEN, IA2 ; min len
+%%_use_min:
+ vpbroadcastw XWORD(ZTMP3), WORD(MIN_LEN)
+ vpsubw XWORD(ZTMP0), XWORD(ZTMP0), XWORD(ZTMP3)
+ vmovdqa [STATE + _des_lens + 2*0], XWORD(ZTMP0)
+ vpsubw XWORD(ZTMP1), XWORD(ZTMP1), XWORD(ZTMP3)
+ vmovdqa [STATE + _des_lens + 2*8], XWORD(ZTMP1)
+
+ push MIN_IDX
+%ifdef SAFE_DATA
+ ;; Save k6, which may be clobbered by following functions
+ kmovq IA0, k6
+ push IA0
+%endif
+
+ mov arg2, MIN_LEN
+%ifidn %%ENC_DEC, ENC
+ ;; encrypt
+%ifidn %%DES_DOCSIS, DOCSIS
+ call docsis_des_x16_enc_avx512
+%endif
+%ifidn %%DES_DOCSIS, DES
+ call des_x16_cbc_enc_avx512
+%endif
+%ifidn %%DES_DOCSIS, 3DES
+ call des3_x16_cbc_enc_avx512
+%endif
+%else ; ENC
+ ;; decrypt
+%ifidn %%DES_DOCSIS, DOCSIS
+ call docsis_des_x16_dec_avx512
+%endif
+%ifidn %%DES_DOCSIS, DES
+ call des_x16_cbc_dec_avx512
+%endif
+%ifidn %%DES_DOCSIS, 3DES
+ call des3_x16_cbc_dec_avx512
+%endif
+%endif ; DEC
+%ifdef SAFE_DATA
+ ;; Restore k6, which may have been clobbered by previous functions
+ pop IA0
+ kmovq k6, IA0
+%endif
+ pop MIN_IDX
+ jmp %%_des_flush_end
+
+%%_des_flush_null_end:
+ xor rax, rax
+ jmp %%_des_flush_return
+%%_des_flush_end:
+ ;; return a job
+ ;; - decrement number of jobs in use
+ sub qword [STATE + _des_lanes_in_use], 1
+ ;; - put the lane back to free lanes pool
+ mov IA0, [STATE + _des_unused_lanes]
+ shl IA0, 4
+ or IA0, MIN_IDX
+ mov [STATE + _des_unused_lanes], IA0
+ ;; - mark job as complete
+ mov rax, [STATE + _des_job_in_lane + MIN_IDX*8]
+ or dword [rax + _status], STS_COMPLETED_AES
+ ;; - clear job pointer
+ mov qword [STATE + _des_job_in_lane + MIN_IDX*8], 0
+%ifdef SAFE_DATA
+ ; Set bit of lane of returned job
+ xor DWORD(IA0), DWORD(IA0)
+ bts DWORD(IA0), DWORD(MIN_IDX)
+ kmovd k1, DWORD(IA0)
+ kord k6, k1, k6
+
+ ;; Clear IV of returned job and "NULL lanes" (k6 contains the mask of the jobs)
+ vpxorq ZTMP1, ZTMP1
+ vmovdqa32 [STATE + _des_args_IV]{k6}, ZTMP1
+ vmovdqa32 [STATE + _des_args_IV + (16*4)]{k6}, ZTMP1
+%endif
+%%_des_flush_return:
+ vzeroupper
+%endmacro
+
+;;; ========================================================
+;;; DATA
+
+section .data
+default rel
+
+;;; ========================================================
+;;; CODE
+section .text
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_des_cbc_enc_avx512,function,internal)
+submit_job_des_cbc_enc_avx512:
+ GENERIC_DES_SUBMIT DES, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_des_cbc_dec_avx512,function,internal)
+submit_job_des_cbc_dec_avx512:
+ GENERIC_DES_SUBMIT DES, DEC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_docsis_des_enc_avx512,function,internal)
+submit_job_docsis_des_enc_avx512:
+ GENERIC_DES_SUBMIT DOCSIS, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_docsis_des_dec_avx512,function,internal)
+submit_job_docsis_des_dec_avx512:
+ GENERIC_DES_SUBMIT DOCSIS, DEC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_3des_cbc_enc_avx512,function,internal)
+submit_job_3des_cbc_enc_avx512:
+ GENERIC_DES_SUBMIT 3DES, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+;;; arg 2 : job
+align 64
+MKGLOBAL(submit_job_3des_cbc_dec_avx512,function,internal)
+submit_job_3des_cbc_dec_avx512:
+ GENERIC_DES_SUBMIT 3DES, DEC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_des_cbc_enc_avx512,function,internal)
+flush_job_des_cbc_enc_avx512:
+ GENERIC_DES_FLUSH DES, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_des_cbc_dec_avx512,function,internal)
+flush_job_des_cbc_dec_avx512:
+ GENERIC_DES_FLUSH DES, DEC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_docsis_des_enc_avx512,function,internal)
+flush_job_docsis_des_enc_avx512:
+ GENERIC_DES_FLUSH DOCSIS, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_docsis_des_dec_avx512,function,internal)
+flush_job_docsis_des_dec_avx512:
+ GENERIC_DES_FLUSH DOCSIS, DEC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_3des_cbc_enc_avx512,function,internal)
+flush_job_3des_cbc_enc_avx512:
+ GENERIC_DES_FLUSH 3DES, ENC
+ ret
+
+;;; arg 1 : pointer to DES OOO structure
+align 64
+MKGLOBAL(flush_job_3des_cbc_dec_avx512,function,internal)
+flush_job_3des_cbc_dec_avx512:
+ GENERIC_DES_FLUSH 3DES, DEC
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_flush_avx512.asm
new file mode 100644
index 000000000..5fa08053f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_flush_avx512.asm
@@ -0,0 +1,367 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8 R9 R10 R11
+;; Windows preserves: RBX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RSI RDI R8 R9 R10 R11
+;; Linux preserves: RBX RCX RDX RBP R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+;; %define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_x16_avx512
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203
+ dq 0x0c0d0e0f08090a0b
+
+align 32
+len_masks:
+ dq 0x000000000000FFFF, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x00000000FFFF0000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000FFFF00000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x000000000000FFFF, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x00000000FFFF0000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000FFFF00000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0xFFFF000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x000000000000FFFF
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x00000000FFFF0000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000FFFF00000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0xFFFF000000000000
+
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+lane_8: dq 8
+lane_9: dq 9
+lane_10: dq 10
+lane_11: dq 11
+lane_12: dq 12
+lane_13: dq 13
+lane_14: dq 14
+lane_15: dq 15
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+; idx needs to be in rbx, rdi, rbp
+%define idx rbp
+
+%define unused_lanes r9
+%define lane_data r9
+%define tmp2 r9
+%define num_lanes_inuse r12
+%define len_upper r13
+%define idx_upper r14
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%endif
+
+; we clobber rbp, called routine clobbers r12-r15
+struc STACK
+_gpr_save: resq 5
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_avx512,function,internal)
+flush_job_hmac_avx512:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32 ; align stack to 32 byte boundary
+ mov [rsp + _gpr_save + 8*0], rbp
+ mov [rsp + _gpr_save + 8*1], r12
+ mov [rsp + _gpr_save + 8*2], r13
+ mov [rsp + _gpr_save + 8*3], r14
+ mov [rsp + _gpr_save + 8*4], r15
+ mov [rsp + _rsp_save], rax
+
+ DBGPRINTL "---------- start hmac flush avx512 -----------"
+
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_sha1] ;empty?
+ cmp num_lanes_inuse, 0
+ jz return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+%assign I 1
+%rep 15
+ cmp qword [state + _ldata + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ cmovne idx, [rel APPEND(lane_,I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy valid lane (idx) to empty lanes
+ vmovdqa ymm0, [state + _lens]
+ mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
+
+%assign I 0
+%rep 16
+ cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr + PTR_SZ*I], tmp
+ vpor ymm0, ymm0, [rel len_masks + 32*I] ; 32 for ymm, 16 for xmm
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+ vmovdqa [state + _lens], ymm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+
+ vmovdqa xmm2, [state + _lens + 8*2]
+ vphminposuw xmm3, xmm2
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+ vmovdqa xmm1, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper ; idx would be in range 0..7
+ add idx, 8 ; to reflect that index is in 8..F range
+
+use_min:
+ DBGPRINTL64 "FLUSH min_length", len2
+ DBGPRINTL64 "FLUSH min_length index ", idx
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1
+ DBGPRINTL_XMM "FLUSH lens after shuffle", xmm1
+
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens], xmm0
+ vpsubw xmm2, xmm2, xmm1
+ vmovdqa [state + _lens + 8*2], xmm2
+ DBGPRINTL_XMM "FLUSH lens immediately after min subtraction (0..7)", xmm0
+ DBGPRINTL_XMM "FLUSH lens immediately after min subtraction (8..F)", xmm2
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_x16_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*4], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ DBGPRINTL "FLUSH *** ---------- return null"
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 4 ;; a nibble
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ sub dword [state + _num_lanes_inuse_sha1], 1
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(r12), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(r12)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(r12)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(r13), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(r14), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(r13)
+ bswap DWORD(r14)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(r13)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(r14)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxorq zmm0, zmm0
+
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 16
+ cmp qword [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ lea lane_data, [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size)]
+
+ ;; Clear first 64 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqu64 [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ DBGPRINTL "---------- exit hmac flush avx512 -----------"
+ vzeroupper
+
+ mov rbp, [rsp + _gpr_save + 8*0]
+ mov r12, [rsp + _gpr_save + 8*1]
+ mov r13, [rsp + _gpr_save + 8*2]
+ mov r14, [rsp + _gpr_save + 8*3]
+ mov r15, [rsp + _gpr_save + 8*4]
+ mov rsp, [rsp + _rsp_save]
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_flush_avx512.asm
new file mode 100644
index 000000000..656e854d5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_flush_avx512.asm
@@ -0,0 +1,28 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+%define SHA224
+%include "avx512/mb_mgr_hmac_sha_256_flush_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_submit_avx512.asm
new file mode 100644
index 000000000..60a98918a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_224_submit_avx512.asm
@@ -0,0 +1,28 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+%define SHA224
+%include "avx512/mb_mgr_hmac_sha_256_submit_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_flush_avx512.asm
new file mode 100644
index 000000000..023eb3454
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_flush_avx512.asm
@@ -0,0 +1,433 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8 R9 R10 R11
+;; Windows preserves: RBX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX RSI RDI R8 R9 R10 R11
+;; Linux preserves: RBX RBP R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+;; %define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha256_x16_avx512
+
+section .data
+default rel
+align 16
+byteswap:
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+align 32
+len_masks:
+ dq 0x000000000000FFFF, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x00000000FFFF0000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000FFFF00000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000, 0x0000000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x000000000000FFFF, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x00000000FFFF0000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000FFFF00000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0xFFFF000000000000, 0x0000000000000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x000000000000FFFF
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x00000000FFFF0000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000FFFF00000000
+ dq 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0xFFFF000000000000
+
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+lane_8: dq 8
+lane_9: dq 9
+lane_10: dq 10
+lane_11: dq 11
+lane_12: dq 12
+lane_13: dq 13
+lane_14: dq 14
+lane_15: dq 15
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r15
+%define idx rbp
+
+%define unused_lanes r10
+%define tmp5 r10
+
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 arg3
+%define tmp r9
+
+%define len_upper r13
+%define idx_upper r14
+
+
+; we clobber rsi, rbp; called routine also clobbers rax, r9 to r15
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_sha_224_avx512(MB_MGR_HMAC_SHA_256_OOO *state)
+; JOB* flush_job_hmac_sha_256_avx512(MB_MGR_HMAC_SHA_256_OOO *state)
+; arg 1 : state
+align 32
+%ifdef SHA224
+MKGLOBAL(flush_job_hmac_sha_224_avx512,function,internal)
+flush_job_hmac_sha_224_avx512:
+%else
+MKGLOBAL(flush_job_hmac_sha_256_avx512,function,internal)
+flush_job_hmac_sha_256_avx512:
+%endif
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; if bit (32+3) is set, then all lanes are empty
+ cmp dword [state + _num_lanes_inuse_sha256], 0
+ jz return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+
+%assign I 1
+%rep 15
+ cmp qword [state + _ldata_sha256 + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ cmovne idx, [rel APPEND(lane_,I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ vmovdqa ymm0, [state + _lens_sha256]
+ mov tmp, [state + _args_data_ptr_sha256 + PTR_SZ*idx]
+
+%assign I 0
+%rep 16
+ cmp qword [state + _ldata_sha256 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*I], tmp
+ vpor ymm0, ymm0, [rel len_masks + 32*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha256 ], ymm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+
+ vmovdqa xmm2, [state + _lens_sha256 + 8*2]
+ vphminposuw xmm3, xmm2
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+ vmovdqa xmm1, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper ; idx would be in range 0..7
+ add idx, 8 ; to reflect that index is in 8..F range
+
+use_min:
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256], xmm0
+ vpsubw xmm2, xmm2, xmm1
+ vmovdqa [state + _lens_sha256 + 8*2], xmm2
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_x16_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov job, [lane_data + _job_in_lane]
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ sub dword [state + _num_lanes_inuse_sha256], 1
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+
+ ;; copy SHA224 14 bytes / SHA256 16 bytes
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp4)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp5)
+%else
+ mov [p + 3*4], DWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy SHA224 28 bytes / SHA256 32 bytes
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp4)
+ mov [p + 3*4], DWORD(tmp5)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+%ifndef SHA224
+ bswap DWORD(tmp5)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp4)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxorq zmm0, zmm0
+
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 16
+ cmp qword [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (28 bytes for SHA-224, 32 bytes for SHA-256 bytes)
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+%ifdef SHA224
+ vmovdqa64 [lane_data + _outer_block], xmm0
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqu64 [lane_data + _outer_block], ymm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ vzeroupper
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_submit_avx512.asm
new file mode 100644
index 000000000..baadef492
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_256_submit_avx512.asm
@@ -0,0 +1,445 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8 R9 R10 R11
+;; Windows preserves: RBX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX RSI RDI R8 R9 R10 R11
+;; Linux preserves: RBX RBP R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+
+;; %define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha256_x16_avx512
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+; idx needs to be in rbp, r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define p2 rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset arg3
+%define tmp2 arg3
+
+%define lane arg4
+%define tmp3 arg4
+
+%define extra_blocks r8
+%define tmp r9
+%define lane_data r10
+
+%define len_upper r13
+%define idx_upper r14
+
+; we clobber rbx, rsi, rdi, rbp; called routine also clobbers r9 to r15
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+ align 32
+%ifdef SHA224
+MKGLOBAL(submit_job_hmac_sha_224_avx512,function,internal)
+submit_job_hmac_sha_224_avx512:
+%else
+MKGLOBAL(submit_job_hmac_sha_256_avx512,function,internal)
+submit_job_hmac_sha_256_avx512:
+%endif
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov lane, unused_lanes
+ and lane, 0xF ;; just a nibble
+ shr unused_lanes, 4
+
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ add dword [state + _num_lanes_inuse_sha256], 1
+
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+ mov [state + _lens_sha256 + 2*lane], WORD(tmp)
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ vmovdqu32 zmm0, [p - 64 + len]
+ vmovdqu32 [lane_data + _extra_block], zmm0
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*lane + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*lane + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*lane + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*lane + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*lane + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ mov [state + _lens_sha256 + 2*lane], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp dword [state + _num_lanes_inuse_sha256], 0x10 ; all 16 lanes used?
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha256]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+
+ vmovdqa xmm2, [state + _lens_sha256 + 8*2]
+ vphminposuw xmm3, xmm2
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+ vmovdqa xmm1, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper ; idx is in range 0..7
+ add idx, 8 ; to reflect that real index is in 8..F range
+use_min:
+ cmp len2, 0
+ je len_is_0
+
+ vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha256 + 0*2], xmm0
+ vpsubw xmm2, xmm2, xmm1
+ vmovdqa [state + _lens_sha256 + 8*2], xmm2
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_x16_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ vpshufb xmm1, xmm1, [rel byteswap]
+ vmovdqa [lane_data + _outer_block], xmm0
+ vmovdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ vmovdqu xmm1, [tmp + 4*4]
+ vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx2_64_1 p2, p, len, tmp, tmp2, ymm0, ymm1
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+ sub dword [state + _num_lanes_inuse_sha256], 1
+
+ vzeroupper
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+
+ ;; copy 14 bytes for SHA224 // 16 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp4)
+%else
+ mov [p + 3*4], DWORD(tmp4)
+%endif
+ jmp clear_ret
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 // 32 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+ mov [p + 3*4], DWORD(tmp4)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+%ifndef SHA224
+ bswap DWORD(tmp4)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp3)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B) of returned job
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxorq zmm0, zmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+%ifdef SHA224
+ vmovdqa64 [lane_data + _outer_block], xmm0
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ vmovdqu64 [lane_data + _outer_block], ymm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_flush_avx512.asm
new file mode 100644
index 000000000..698052730
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_flush_avx512.asm
@@ -0,0 +1,29 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define SHA384
+%include "avx512/mb_mgr_hmac_sha_512_flush_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_submit_avx512.asm
new file mode 100644
index 000000000..0e9f611de
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_384_submit_avx512.asm
@@ -0,0 +1,29 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define SHA384
+%include "avx512/mb_mgr_hmac_sha_512_submit_avx512.asm"
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_flush_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_flush_avx512.asm
new file mode 100644
index 000000000..7d7e56b40
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_flush_avx512.asm
@@ -0,0 +1,384 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, R12-R15
+;;
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha512_x8_avx512
+
+section .data
+default rel
+
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+
+lane_1: dq 1
+lane_2: dq 2
+lane_3: dq 3
+lane_4: dq 4
+lane_5: dq 5
+lane_6: dq 6
+lane_7: dq 7
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+%define tmp5 r9
+%define tmp6 r10
+
+struc STACK
+_gpr_save: resq 7 ; rbx, rbp, r12-r15, rdi (windows)
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+%ifndef SHA384
+; JOB* flush_job_hmac_sha_384_avx512(MB_MGR_HMAC_SHA_512_OOO *state)
+; arg 1 : state
+%define SHA_X_DIGEST_SIZE 512
+MKGLOBAL(flush_job_hmac_sha_512_avx512,function,internal)
+align 64
+flush_job_hmac_sha_512_avx512:
+%else
+; JOB* flush_job_hmac_sha_512_avx512(MB_MGR_HMAC_SHA_512_OOO *state)
+; arg 1 : state
+%define SHA_X_DIGEST_SIZE 384
+MKGLOBAL(flush_job_hmac_sha_384_avx512,function,internal)
+align 64
+flush_job_hmac_sha_384_avx512:
+%endif
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+%assign I 1
+%rep 7
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ cmovne idx, [rel APPEND(lane_, I)]
+%assign I (I+1)
+%endrep
+
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ vmovdqa xmm0, [state + _lens_sha512]
+ mov tmp, [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*idx]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*I], tmp
+ vpor xmm0, xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ vmovdqa [state + _lens_sha512], xmm0
+
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, [rel dupw] ; duplicate words across all 8 lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x8_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+ mov word [state + _lens_sha512 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+ ; move digest into data location
+ %assign I 0
+ %rep (SHA_X_DIGEST_SIZE / (8*16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I*2*SHA512_DIGEST_WORD_SIZE], xmm0
+ %assign I (I+1)
+ %endrep
+
+ ; move the opad key into digest
+ mov tmp, [job + _auth_key_xor_opad]
+
+ %assign I 0
+ %rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+ %assign I (I+1)
+ %endrep
+
+ jmp copy_lane_data
+
+ align 32
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+ mov [state + _lens_sha512 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 32
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+ mov [p + 3*8], QWORD(tmp5)
+
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 4*8], QWORD(tmp2)
+ mov [p + 5*8], QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp6)
+ mov [p + 7*8], QWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ vpxorq zmm0, zmm0
+
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size) + _job_in_lane_sha512], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (48 bytes for SHA-384, 64 bytes for SHA-512 bytes)
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 6*SHA512_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 7*SHA512_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size)]
+ ;; Clear first 128 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+ vmovdqu64 [lane_data + _extra_block + 64], zmm0
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+%if (SHA_X_DIGEST_SIZE == 384)
+ vmovdqu64 [lane_data + _outer_block], ymm0
+ vmovdqa64 [lane_data + _outer_block + 32], xmm0
+%else
+ vmovdqu64 [lane_data + _outer_block], zmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ vzeroupper
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rdi, [rsp + _gpr_save + 8*6]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_submit_avx512.asm
new file mode 100644
index 000000000..a2b66e54f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_sha_512_submit_avx512.asm
@@ -0,0 +1,413 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+
+extern sha512_x8_avx512
+
+section .data
+default rel
+
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp, r13, r14, r16
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset arg3
+%define tmp2 arg3
+
+%define lane arg4
+%define tmp3 arg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+; Define stack usage
+
+; we clobber rbx, rsi, rdi, rbp; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_sha_512_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+align 64
+%ifndef SHA384
+MKGLOBAL(submit_job_hmac_sha_512_avx512,function,internal)
+%define SHA_X_DIGEST_SIZE 512
+submit_job_hmac_sha_512_avx512:
+%else
+MKGLOBAL(submit_job_hmac_sha_384_avx512,function,internal)
+%define SHA_X_DIGEST_SIZE 384
+submit_job_hmac_sha_384_avx512:
+%endif
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ mov lane, unused_lanes
+ and lane, 15
+ shr unused_lanes, 4
+ imul lane_data, lane, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov [state + _unused_lanes_sha512], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 7 ; divide by 128, len in terms of blocks
+
+ mov [lane_data + _job_in_lane_sha512], job
+ mov dword [lane_data + _outer_done_sha512], 0
+ mov [state + _lens_sha512 + 2*lane], WORD(tmp) ; 2 is word size in bytes
+
+ mov last_len, len
+ and last_len, 127
+ lea extra_blocks, [last_len + 17 + 127]
+ shr extra_blocks, 7
+ mov [lane_data + _extra_blocks_sha512], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], p
+
+ cmp len, 128
+ jb copy_lt128
+
+fast_copy:
+ add p, len
+ vmovdqu32 zmm0, [p - 128 + 0*64]
+ vmovdqu32 zmm1, [p - 128 + 1*64]
+ vmovdqu32 [lane_data + _extra_block_sha512 + 0*64], zmm0
+ vmovdqu32 [lane_data + _extra_block_sha512 + 1*64], zmm1
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 7
+ sub size_offset, last_len
+ add size_offset, 128-8
+ mov [lane_data + _size_offset_sha512], DWORD(size_offset)
+ mov start_offset, 128
+ sub start_offset, last_len
+ mov [lane_data + _start_offset_sha512], DWORD(start_offset)
+
+ lea tmp, [8*128 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block_sha512 + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 2 * SHA512_DIGEST_WORD_SIZE]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ test len, ~127
+ jnz ge128_bytes
+
+lt128_bytes:
+ mov [state + _lens_sha512 + 2*lane], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], tmp ;; 8 to hold a UINT8
+ mov dword [lane_data + _extra_blocks_sha512], 0
+
+ge128_bytes:
+ cmp unused_lanes, 0xf
+ jne return_null
+ jmp start_loop
+
+ align 32
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens_sha512]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+ cmp len2, 0
+ je len_is_0
+
+ vpshufb xmm1, [rel dupw] ; duplicate words across all 8 lanes
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x8_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+ mov word [state + _lens_sha512 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+%assign I 0
+%rep (SHA_X_DIGEST_SIZE / (8 * 16))
+ vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 0)*SHA512_DIGEST_ROW_SIZE]
+ vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ vpshufb xmm0, [rel byteswap]
+ vmovdqa [lane_data + _outer_block_sha512 + I * 2 * SHA512_DIGEST_WORD_SIZE], xmm0
+%assign I (I+1)
+%endrep
+
+ mov tmp, [job + _auth_key_xor_opad]
+%assign I 0
+%rep 4
+ vmovdqu xmm0, [tmp + I * 16]
+ vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I+0)*SHA512_DIGEST_ROW_SIZE], xmm0
+ vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+
+ jmp start_loop
+
+ align 32
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+ mov [state + _lens_sha512 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp ;; idx is index of shortest length message
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp start_loop
+
+ align 32
+copy_lt128:
+ ;; less than one message block of data
+ ;; destination extra block but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 128]
+ sub p2, len
+ memcpy_avx2_128_1 p2, p, len, tmp4, tmp2, ymm0, ymm1, ymm2, ymm3
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 32
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ vzeroupper
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+ mov [p + 3*8], QWORD(tmp4)
+
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 4*8], QWORD(tmp)
+ mov [p + 5*8], QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp3)
+ mov [p + 7*8], QWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA256_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ vpxorq zmm0, zmm0
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ ;; Clear first 128 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+ vmovdqu64 [lane_data + _extra_block + 64], zmm0
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+%if (SHA_X_DIGEST_SIZE == 384)
+ vmovdqu64 [lane_data + _outer_block], ymm0
+ vmovdqa64 [lane_data + _outer_block + 32], xmm0
+%else
+ vmovdqu64 [lane_data + _outer_block], zmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_submit_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_submit_avx512.asm
new file mode 100644
index 000000000..2fe8482a9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/mb_mgr_hmac_submit_avx512.asm
@@ -0,0 +1,402 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8 R9 R10 R11
+;; Windows preserves: RBX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX RSI RDI R8 R9 R10 R11
+;; Linux preserves: RBX RBP R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+
+;; %define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_x16_avx512
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203
+ dq 0x0c0d0e0f08090a0b
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rdi, rbp
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes r12
+%define tmp4 r12
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+%define num_lanes_inuse r12
+%define len_upper r13
+%define idx_upper r14
+%endif
+
+; we clobber rsi, rdi, rbp, r12; called routine clobbers also r9-r15
+struc STACK
+_gpr_save: resq 7
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_avx512,function,internal)
+submit_job_hmac_avx512:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -32 ; align to 32 byte boundary
+ mov [rsp + _gpr_save + 8*0], rbp
+ mov [rsp + _gpr_save + 8*1], r12
+ mov [rsp + _gpr_save + 8*2], r13
+ mov [rsp + _gpr_save + 8*3], r14
+ mov [rsp + _gpr_save + 8*4], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*5], rsi
+ mov [rsp + _gpr_save + 8*6], rdi
+%endif
+ mov [rsp + _rsp_save], rax
+ DBGPRINTL "---------- enter sha1 submit -----------"
+
+ mov unused_lanes, [state + _unused_lanes]
+ mov lane, unused_lanes
+ and lane, 0xF ;; just a nibble
+ shr unused_lanes, 4
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov [state + _unused_lanes], unused_lanes
+ DBGPRINTL64 "lane", lane
+ DBGPRINTL64 "unused_lanes", unused_lanes
+
+ add dword [state + _num_lanes_inuse_sha1], 1
+
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+ mov [state + _lens + 2*lane], WORD(tmp)
+
+ mov last_len, len
+ DBGPRINTL64 "last_len", last_len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ DBGPRINTL64 "extra_blocks", extra_blocks
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr + PTR_SZ*lane], p
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ vmovdqu32 zmm0, [p - 64 + len]
+ vmovdqu32 [lane_data + _extra_block], zmm0
+
+end_fast_copy:
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ DBGPRINTL64 "lt64_bytes extra_blocks", extra_blocks
+ DBGPRINTL64 "lt64_bytes start_offset", start_offset
+ mov [state + _lens + 2*lane], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse_sha1]
+ cmp num_lanes_inuse, 0x10 ; all 16 lanes used?
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ vmovdqa xmm0, [state + _lens]
+ vphminposuw xmm1, xmm0
+ vpextrw DWORD(len2), xmm1, 0 ; min value
+ vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
+
+ vmovdqa xmm2, [state + _lens + 8*2]
+ vphminposuw xmm3, xmm2
+ vpextrw DWORD(len_upper), xmm3, 0 ; min value
+ vpextrw DWORD(idx_upper), xmm3, 1 ; min index (8...F)
+
+ cmp len2, len_upper
+ jle use_min
+
+ vmovdqa xmm1, xmm3
+ mov len2, len_upper
+ mov idx, idx_upper ; idx would be in range 0..7
+ add idx, 8 ; to reflect that index is in 8..F range
+
+use_min:
+ cmp len2, 0
+ je len_is_0
+
+ DBGPRINTL64 "min_length", len2
+ DBGPRINTL64 "min_length index ", idx
+
+ vpbroadcastw xmm1, xmm1
+ DBGPRINTL_XMM "SUBMIT lens after shuffle", xmm1
+
+ vpsubw xmm0, xmm0, xmm1
+ vmovdqa [state + _lens + 0*2], xmm0
+ vpsubw xmm2, xmm2, xmm1
+ vmovdqa [state + _lens + 8*2], xmm2
+ DBGPRINTL_XMM "lengths after subtraction (0..7)", xmm0
+ DBGPRINTL_XMM "lengths after subtraction (8..F)", xmm2
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_x16_avx512
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ vpshufb xmm0, xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ vmovdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ vmovdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_avx2_64_1 p2, p, len, tmp4, tmp2, ymm0, ymm1
+ mov unused_lanes, [state + _unused_lanes]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov qword [lane_data + _job_in_lane], 0
+
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ sub dword [state + _num_lanes_inuse_sha1], 1
+
+ mov p, [job_rax + _auth_tag_output]
+
+ vzeroupper
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ mov [p + 0*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 1*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 2*SHA1_DIGEST_WORD_SIZE], DWORD(tmp3)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ vpxorq zmm0, zmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+
+ ;; Clear first 64 bytes of extra_block
+ vmovdqu64 [lane_data + _extra_block], zmm0
+
+ ;; Clear first 20 bytes of outer_block
+ vmovdqu64 [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+%endif
+
+return:
+ DBGPRINTL "---------- exit sha1 submit -----------"
+ mov rbp, [rsp + _gpr_save + 8*0]
+ mov r12, [rsp + _gpr_save + 8*1]
+ mov r13, [rsp + _gpr_save + 8*2]
+ mov r14, [rsp + _gpr_save + 8*3]
+ mov r15, [rsp + _gpr_save + 8*4]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*5]
+ mov rdi, [rsp + _gpr_save + 8*6]
+%endif
+ mov rsp, [rsp + _rsp_save]
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/sha1_x16_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/sha1_x16_avx512.asm
new file mode 100644
index 000000000..d67046ce5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/sha1_x16_avx512.asm
@@ -0,0 +1,439 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RDX R8 R9 R10 R11 R12 R13 R14 R15
+;; Windows preserves: RBX RCX RBP RSI RDI
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RDX RSI R9 R10 R11 R12 R13 R14 R15
+;; Linux preserves: RBX RCX RBP RDI R8
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx512.asm"
+%include "include/reg_sizes.asm"
+
+section .data
+default rel
+align 64
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ ;ddq 0x5A8279995A8279995A8279995A827999
+ ;ddq 0x5A8279995A8279995A8279995A827999
+ ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%endif
+
+%define state arg1
+%define SIZE arg2
+%define IDX arg3
+
+%define A zmm0
+%define B zmm1
+%define C zmm2
+%define D zmm3
+%define E zmm4
+%define KT zmm5
+%define AA zmm6
+%define BB zmm7
+%define CC zmm8
+%define DD zmm9
+%define EE zmm10
+%define TMP0 zmm11
+%define TMP1 zmm12
+%define TMP2 zmm13
+%define TMP3 zmm14
+%define TMP4 zmm15
+
+%define W0 zmm16
+%define W1 zmm17
+%define W2 zmm18
+%define W3 zmm19
+%define W4 zmm20
+%define W5 zmm21
+%define W6 zmm22
+%define W7 zmm23
+%define W8 zmm24
+%define W9 zmm25
+%define W10 zmm26
+%define W11 zmm27
+%define W12 zmm28
+%define W13 zmm29
+%define W14 zmm30
+%define W15 zmm31
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+%define inp4 r13
+%define inp5 r14
+%define inp6 r15
+%define inp7 rax
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+%macro PROCESS_LOOP 2
+%define %%WT %1
+%define %%F_IMMED %2
+
+ ; T = ROTL_5(A) + Ft(B,C,D) + E + Kt + Wt
+ ; E=D, D=C, C=ROTL_30(B), B=A, A=T
+
+ ; Ft
+ ; 0-19 Ch(B,C,D) = (B&C) ^ (~B&D)
+ ; 20-39, 60-79 Parity(B,C,D) = B ^ C ^ D
+ ; 40-59 Maj(B,C,D) = (B&C) ^ (B&D) ^ (C&D)
+
+ vmovdqa32 TMP1, B ; Copy B
+ vpaddd E, E, %%WT ; E = E + Wt
+ vpternlogd TMP1, C, D, %%F_IMMED ; TMP1 = Ft(B,C,D)
+ vpaddd E, E, KT ; E = E + Wt + Kt
+ vprold TMP0, A, 5 ; TMP0 = ROTL_5(A)
+ vpaddd E, E, TMP1 ; E = Ft(B,C,D) + E + Kt + Wt
+ vprold B, B, 30 ; B = ROTL_30(B)
+ vpaddd E, E, TMP0 ; E = T
+
+ ROTATE_ARGS
+%endmacro
+
+%macro MSG_SCHED_ROUND_16_79 4
+%define %%WT %1
+%define %%WTp2 %2
+%define %%WTp8 %3
+%define %%WTp13 %4
+ ; Wt = ROTL_1(Wt-3 ^ Wt-8 ^ Wt-14 ^ Wt-16)
+ ; Wt+16 = ROTL_1(Wt+13 ^ Wt+8 ^ Wt+2 ^ Wt)
+ vpternlogd %%WT, %%WTp2, %%WTp8, 0x96
+ vpxord %%WT, %%WT, %%WTp13
+ vprold %%WT, %%WT, 1
+%endmacro
+
+
+; Note this is reading in two blocks of data from each lane,
+; in preparation for the upcoming needed transpose to build msg schedule.
+; Each register will contain 32 bytes from one lane plus 32 bytes
+; from another lane.
+; The first 8 registers will contain the first 32 bytes of all lanes,
+; where register X (0 <= X <= 7) will contain bytes 0-31 from lane X in the first half
+; and 0-31 bytes from lane X+8 in the second half.
+; The last 8 registers will contain the last 32 bytes of all lanes,
+; where register Y (8 <= Y <= 15) wil contain bytes 32-63 from lane Y-8 in the first half
+; and 32-63 bytes from lane Y in the second half.
+; This method helps reducing the number of shuffles required to transpose the data.
+%macro MSG_SCHED_ROUND_00_15 6
+%define %%Wt %1 ; [out] zmm register to load the next block
+%define %%LANE_IDX %2 ; [in] lane index (0-15)
+%define %%BASE_PTR %3 ; [in] base address of the input data
+%define %%OFFSET_PTR %4 ; [in] offset to get next block of data from the lane
+%define %%TMP1 %5 ; [clobbered] temporary gp register
+%define %%TMP2 %6 ; [clobbered] temporary gp register
+%if (%%LANE_IDX < 8)
+ mov %%TMP1, [%%BASE_PTR + %%LANE_IDX*PTR_SZ]
+ mov %%TMP2, [%%BASE_PTR + (%%LANE_IDX+8)*PTR_SZ]
+ vmovups YWORD(%%Wt), [%%TMP1+%%OFFSET_PTR]
+ vinserti64x4 %%Wt, %%Wt, [%%TMP2+%%OFFSET_PTR], 0x01
+%else
+ mov %%TMP1, [%%BASE_PTR + (%%LANE_IDX-8)*PTR_SZ]
+ mov %%TMP2, [%%BASE_PTR + %%LANE_IDX*PTR_SZ]
+ vmovups YWORD(%%Wt), [%%TMP1+%%OFFSET_PTR+32]
+ vinserti64x4 %%Wt, %%Wt, [%%TMP2+%%OFFSET_PTR+32], 0x01
+%endif
+%endmacro
+
+align 64
+; void sha1_mult_x16_avx3(void **input_data, UINT128 *digest, UINT32 size)
+; arg 1 : pointer to SHA1 args structure
+; arg 2 : size (in blocks) ;; assumed to be >= 1
+MKGLOBAL(sha1_x16_avx512,function,internal)
+sha1_x16_avx512:
+ ;; Initialize digests
+ vmovdqu32 A, [state + 0*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu32 B, [state + 1*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu32 C, [state + 2*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu32 D, [state + 3*SHA1_DIGEST_ROW_SIZE]
+ vmovdqu32 E, [state + 4*SHA1_DIGEST_ROW_SIZE]
+ DBGPRINTL_ZMM "Sha1-AVX512 incoming transposed digest", A, B, C, D, E
+ DBGPRINTL64 "SIZE", SIZE
+
+ xor IDX, IDX
+
+ ;; Load first blocks of data into ZMM registers before
+ ;; performing a 16x16 32-bit transpose.
+ ;; To speed up the transpose, data is loaded in chunks of 32 bytes,
+ ;; interleaving data between lane X and lane X+8.
+ ;; This way, final shuffles between top half and bottom half
+ ;; of the matrix are avoided.
+ mov inp0, [state + _data_ptr_sha1 + 0*PTR_SZ]
+ mov inp1, [state + _data_ptr_sha1 + 1*PTR_SZ]
+ mov inp2, [state + _data_ptr_sha1 + 2*PTR_SZ]
+ mov inp3, [state + _data_ptr_sha1 + 3*PTR_SZ]
+ mov inp4, [state + _data_ptr_sha1 + 4*PTR_SZ]
+ mov inp5, [state + _data_ptr_sha1 + 5*PTR_SZ]
+ mov inp6, [state + _data_ptr_sha1 + 6*PTR_SZ]
+ mov inp7, [state + _data_ptr_sha1 + 7*PTR_SZ]
+
+ TRANSPOSE16_U32_LOAD_FIRST8 W0, W1, W2, W3, W4, W5, W6, W7, \
+ W8, W9, W10, W11, W12, W13, W14, W15, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+
+ mov inp0, [state + _data_ptr_sha1 + 8*PTR_SZ]
+ mov inp1, [state + _data_ptr_sha1 + 9*PTR_SZ]
+ mov inp2, [state + _data_ptr_sha1 +10*PTR_SZ]
+ mov inp3, [state + _data_ptr_sha1 +11*PTR_SZ]
+ mov inp4, [state + _data_ptr_sha1 +12*PTR_SZ]
+ mov inp5, [state + _data_ptr_sha1 +13*PTR_SZ]
+ mov inp6, [state + _data_ptr_sha1 +14*PTR_SZ]
+ mov inp7, [state + _data_ptr_sha1 +15*PTR_SZ]
+
+ TRANSPOSE16_U32_LOAD_LAST8 W0, W1, W2, W3, W4, W5, W6, W7, \
+ W8, W9, W10, W11, W12, W13, W14, W15, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+lloop:
+ vmovdqa32 TMP2, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+ add IDX, 64
+
+ TRANSPOSE16_U32 W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1, TMP3, TMP4
+ DBGPRINTL_ZMM "Sha1-AVX512 incoming transposed input", W0, W1, W2, W3, W4, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15
+
+%assign I 0
+%rep 16
+ vpshufb APPEND(W,I), APPEND(W,I), TMP2
+%assign I (I+1)
+%endrep
+
+ ; Save digests for later addition
+ vmovdqa32 AA, A
+ vmovdqa32 BB, B
+ vmovdqa32 CC, C
+ vmovdqa32 DD, D
+ vmovdqa32 EE, E
+
+ vmovdqa32 KT, [rel K00_19]
+%assign I 0xCA
+%assign J 0
+%assign K 2
+%assign L 8
+%assign M 13
+%assign N 0
+%rep 64
+ PROCESS_LOOP APPEND(W,J), I
+ MSG_SCHED_ROUND_16_79 APPEND(W,J), APPEND(W,K), APPEND(W,L), APPEND(W,M)
+ %if N = 19
+ vmovdqa32 KT, [rel K20_39]
+ %assign I 0x96
+ %elif N = 39
+ vmovdqa32 KT, [rel K40_59]
+ %assign I 0xE8
+ %elif N = 59
+ vmovdqa32 KT, [rel K60_79]
+ %assign I 0x96
+ %endif
+%assign J ((J+1)% 16)
+%assign K ((K+1)% 16)
+%assign L ((L+1)% 16)
+%assign M ((M+1)% 16)
+%assign N (N+1)
+%endrep
+
+ ; Check if this is the last block
+ sub SIZE, 1
+ je lastLoop
+
+%assign I 0x96
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+ MSG_SCHED_ROUND_00_15 APPEND(W,J), J, state + _data_ptr_sha1, IDX, inp0, inp1
+%assign J (J+1)
+%endrep
+
+ ; Add old digest
+ vpaddd A,A,AA
+ vpaddd B,B,BB
+ vpaddd C,C,CC
+ vpaddd D,D,DD
+ vpaddd E,E,EE
+
+ jmp lloop
+
+lastLoop:
+; Need to reset argument rotation values to Round 64 values
+%xdefine TMP_ A
+%xdefine A B
+%xdefine B C
+%xdefine C D
+%xdefine D E
+%xdefine E TMP_
+
+ ; Process last 16 rounds
+%assign I 0x96
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+%assign J (J+1)
+%endrep
+
+ ; Add old digest
+ vpaddd A,A,AA
+ vpaddd B,B,BB
+ vpaddd C,C,CC
+ vpaddd D,D,DD
+ vpaddd E,E,EE
+
+ ; Write out digest
+ ; Do we need to untranspose digests???
+ vmovdqu32 [state + 0*SHA1_DIGEST_ROW_SIZE], A
+ vmovdqu32 [state + 1*SHA1_DIGEST_ROW_SIZE], B
+ vmovdqu32 [state + 2*SHA1_DIGEST_ROW_SIZE], C
+ vmovdqu32 [state + 3*SHA1_DIGEST_ROW_SIZE], D
+ vmovdqu32 [state + 4*SHA1_DIGEST_ROW_SIZE], E
+ DBGPRINTL_ZMM "Sha1-AVX512 outgoing transposed digest", A, B, C, D, E
+
+ ;; update input pointers
+ mov inp0, [state + _data_ptr_sha1 + 0*PTR_SZ]
+ mov inp1, [state + _data_ptr_sha1 + 1*PTR_SZ]
+ mov inp2, [state + _data_ptr_sha1 + 2*PTR_SZ]
+ mov inp3, [state + _data_ptr_sha1 + 3*PTR_SZ]
+ mov inp4, [state + _data_ptr_sha1 + 4*PTR_SZ]
+ mov inp5, [state + _data_ptr_sha1 + 5*PTR_SZ]
+ mov inp6, [state + _data_ptr_sha1 + 6*PTR_SZ]
+ mov inp7, [state + _data_ptr_sha1 + 7*PTR_SZ]
+ add inp0, IDX
+ add inp1, IDX
+ add inp2, IDX
+ add inp3, IDX
+ add inp4, IDX
+ add inp5, IDX
+ add inp6, IDX
+ add inp7, IDX
+ mov [state + _data_ptr_sha1 + 0*PTR_SZ], inp0
+ mov [state + _data_ptr_sha1 + 1*PTR_SZ], inp1
+ mov [state + _data_ptr_sha1 + 2*PTR_SZ], inp2
+ mov [state + _data_ptr_sha1 + 3*PTR_SZ], inp3
+ mov [state + _data_ptr_sha1 + 4*PTR_SZ], inp4
+ mov [state + _data_ptr_sha1 + 5*PTR_SZ], inp5
+ mov [state + _data_ptr_sha1 + 6*PTR_SZ], inp6
+ mov [state + _data_ptr_sha1 + 7*PTR_SZ], inp7
+
+ mov inp0, [state + _data_ptr_sha1 + 8*PTR_SZ]
+ mov inp1, [state + _data_ptr_sha1 + 9*PTR_SZ]
+ mov inp2, [state + _data_ptr_sha1 + 10*PTR_SZ]
+ mov inp3, [state + _data_ptr_sha1 + 11*PTR_SZ]
+ mov inp4, [state + _data_ptr_sha1 + 12*PTR_SZ]
+ mov inp5, [state + _data_ptr_sha1 + 13*PTR_SZ]
+ mov inp6, [state + _data_ptr_sha1 + 14*PTR_SZ]
+ mov inp7, [state + _data_ptr_sha1 + 15*PTR_SZ]
+ add inp0, IDX
+ add inp1, IDX
+ add inp2, IDX
+ add inp3, IDX
+ add inp4, IDX
+ add inp5, IDX
+ add inp6, IDX
+ add inp7, IDX
+ mov [state + _data_ptr_sha1 + 8*PTR_SZ], inp0
+ mov [state + _data_ptr_sha1 + 9*PTR_SZ], inp1
+ mov [state + _data_ptr_sha1 + 10*PTR_SZ], inp2
+ mov [state + _data_ptr_sha1 + 11*PTR_SZ], inp3
+ mov [state + _data_ptr_sha1 + 12*PTR_SZ], inp4
+ mov [state + _data_ptr_sha1 + 13*PTR_SZ], inp5
+ mov [state + _data_ptr_sha1 + 14*PTR_SZ], inp6
+ mov [state + _data_ptr_sha1 + 15*PTR_SZ], inp7
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/sha256_x16_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/sha256_x16_avx512.asm
new file mode 100644
index 000000000..cdbb61ea3
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/sha256_x16_avx512.asm
@@ -0,0 +1,758 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RDX RSI RDI R9 R10 R11 R12 R13 R14 R15
+;; Windows preserves: RCX
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX RSI R9 R10 R11 R12 R13 R14 R15
+;; Linux preserves: RDI
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx512.asm"
+%include "include/reg_sizes.asm"
+
+; re-use K256 from sha256_oct_avx2.asm
+extern K256
+
+;; code to compute x16 SHA256 using AVX512
+
+%define APPEND(a,b) a %+ b
+
+; Define Stack Layout
+START_FIELDS
+;;; name size align
+FIELD _DIGEST_SAVE, 8*64, 64
+FIELD _rsp, 8, 8
+%assign STACK_SPACE _FIELD_OFFSET
+
+%ifdef LINUX
+; Linux register definitions
+ %define arg1 rdi
+ %define arg2 rsi
+ %define arg3 rcx
+ %define arg4 rdx
+%else
+; Windows definitions
+ %define arg1 rcx
+ %define arg2 rdx
+ %define arg3 rsi
+ %define arg4 rdi
+%endif
+
+%define STATE arg1
+%define INP_SIZE arg2
+%define IDX arg3
+%define TBL arg4
+
+%define A zmm0
+%define B zmm1
+%define C zmm2
+%define D zmm3
+%define E zmm4
+%define F zmm5
+%define G zmm6
+%define H zmm7
+%define T1 zmm8
+%define TMP0 zmm9
+%define TMP1 zmm10
+%define TMP2 zmm11
+%define TMP3 zmm12
+%define TMP4 zmm13
+%define TMP5 zmm14
+%define TMP6 zmm15
+
+%define W0 zmm16
+%define W1 zmm17
+%define W2 zmm18
+%define W3 zmm19
+%define W4 zmm20
+%define W5 zmm21
+%define W6 zmm22
+%define W7 zmm23
+%define W8 zmm24
+%define W9 zmm25
+%define W10 zmm26
+%define W11 zmm27
+%define W12 zmm28
+%define W13 zmm29
+%define W14 zmm30
+%define W15 zmm31
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+%define inp4 r13
+%define inp5 r14
+%define inp6 r15
+%define inp7 rax
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ H
+%xdefine H G
+%xdefine G F
+%xdefine F E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+;; CH(A, B, C) = (A&B) ^ (~A&C)
+;; MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G)
+;; SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22
+;; SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25
+;; sigma0 = ROR_7 ^ ROR_18 ^ SHR_3
+;; sigma1 = ROR_17 ^ ROR_19 ^ SHR_10
+
+; Main processing loop per round
+%macro PROCESS_LOOP 2
+%define %%WT %1
+%define %%ROUND %2
+ ;; T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt
+ ;; T2 = SIGMA0(A) + MAJ(A, B, C)
+ ;; H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
+
+ ;; H becomes T2, then add T1 for A
+ ;; D becomes D + T1 for E
+
+ vpaddd T1, H, TMP3 ; T1 = H + Kt
+ vmovdqa32 TMP0, E
+ vprord TMP1, E, 6 ; ROR_6(E)
+ vprord TMP2, E, 11 ; ROR_11(E)
+ vprord TMP3, E, 25 ; ROR_25(E)
+ vpternlogd TMP0, F, G, 0xCA ; TMP0 = CH(E,F,G)
+ vpaddd T1, T1, %%WT ; T1 = T1 + Wt
+ vpternlogd TMP1, TMP2, TMP3, 0x96 ; TMP1 = SIGMA1(E)
+ vpaddd T1, T1, TMP0 ; T1 = T1 + CH(E,F,G)
+ vpaddd T1, T1, TMP1 ; T1 = T1 + SIGMA1(E)
+ vpaddd D, D, T1 ; D = D + T1
+
+ vprord H, A, 2 ; ROR_2(A)
+ vprord TMP2, A, 13 ; ROR_13(A)
+ vprord TMP3, A, 22 ; ROR_22(A)
+ vmovdqa32 TMP0, A
+ vpternlogd TMP0, B, C, 0xE8 ; TMP0 = MAJ(A,B,C)
+ vpternlogd H, TMP2, TMP3, 0x96 ; H(T2) = SIGMA0(A)
+ vpaddd H, H, TMP0 ; H(T2) = SIGMA0(A) + MAJ(A,B,C)
+ vpaddd H, H, T1 ; H(A) = H(T2) + T1
+
+ vmovdqa32 TMP3, [TBL + ((%%ROUND+1)*64)] ; Next Kt
+
+ ;; Rotate the args A-H (rotation of names associated with regs)
+ ROTATE_ARGS
+%endmacro
+
+; This is supposed to be SKL optimized assuming:
+; vpternlog, vpaddd ports 5,8
+; vprord ports 1,8
+; However, vprord is only working on port 8
+;
+; Main processing loop per round
+; Get the msg schedule word 16 from the current, now unneccessary word
+%macro PROCESS_LOOP_00_47 5
+%define %%WT %1
+%define %%ROUND %2
+%define %%WTp1 %3
+%define %%WTp9 %4
+%define %%WTp14 %5
+ ;; T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt
+ ;; T2 = SIGMA0(A) + MAJ(A, B, C)
+ ;; H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
+
+ ;; H becomes T2, then add T1 for A
+ ;; D becomes D + T1 for E
+
+ ;; For next value in msg schedule
+ ;; Wt+16 = sigma1(Wt+14) + Wt+9 + sigma0(Wt+1) + Wt
+
+ vmovdqa32 TMP0, E
+ vprord TMP1, E, 6 ; ROR_6(E)
+ vprord TMP2, E, 11 ; ROR_11(E)
+ vprord TMP3, E, 25 ; ROR_25(E)
+ vpternlogd TMP0, F, G, 0xCA ; TMP0 = CH(E,F,G)
+ vpaddd T1, H, %%WT ; T1 = H + Wt
+ vpternlogd TMP1, TMP2, TMP3, 0x96 ; TMP1 = SIGMA1(E)
+ vpaddd T1, T1, TMP6 ; T1 = T1 + Kt
+ vprord H, A, 2 ; ROR_2(A)
+ vpaddd T1, T1, TMP0 ; T1 = T1 + CH(E,F,G)
+ vprord TMP2, A, 13 ; ROR_13(A)
+ vmovdqa32 TMP0, A
+ vprord TMP3, A, 22 ; ROR_22(A)
+ vpaddd T1, T1, TMP1 ; T1 = T1 + SIGMA1(E)
+ vpternlogd TMP0, B, C, 0xE8 ; TMP0 = MAJ(A,B,C)
+ vpaddd D, D, T1 ; D = D + T1
+ vpternlogd H, TMP2, TMP3, 0x96 ; H(T2) = SIGMA0(A)
+ vprord TMP4, %%WTp14, 17 ; ROR_17(Wt-2)
+ vpaddd H, H, TMP0 ; H(T2) = SIGMA0(A) + MAJ(A,B,C)
+ vprord TMP5, %%WTp14, 19 ; ROR_19(Wt-2)
+ vpsrld TMP6, %%WTp14, 10 ; SHR_10(Wt-2)
+ vpaddd H, H, T1 ; H(A) = H(T2) + T1
+ vpternlogd TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma1(Wt-2)
+ vpaddd %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma1(Wt-2)
+ vprord TMP4, %%WTp1, 7 ; ROR_7(Wt-15)
+ vprord TMP5, %%WTp1, 18 ; ROR_18(Wt-15)
+ vpaddd %%WT, %%WT, %%WTp9 ; Wt = Wt-16 + sigma1(Wt-2) + Wt-7
+ vpsrld TMP6, %%WTp1, 3 ; SHR_3(Wt-15)
+ vpternlogd TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma0(Wt-15)
+ vpaddd %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma1(Wt-2) +
+ ; Wt-7 + sigma0(Wt-15) +
+
+ vmovdqa32 TMP6, [TBL + ((%%ROUND+1)*64)] ; Next Kt
+
+ ;; Rotate the args A-H (rotation of names associated with regs)
+ ROTATE_ARGS
+%endmacro
+
+%macro MSG_SCHED_ROUND_16_63 4
+%define %%WT %1
+%define %%WTp1 %2
+%define %%WTp9 %3
+%define %%WTp14 %4
+ vprord TMP4, %%WTp14, 17 ; ROR_17(Wt-2)
+ vprord TMP5, %%WTp14, 19 ; ROR_19(Wt-2)
+ vpsrld TMP6, %%WTp14, 10 ; SHR_10(Wt-2)
+ vpternlogd TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma1(Wt-2)
+
+ vpaddd %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma1(Wt-2)
+ vpaddd %%WT, %%WT, %%WTp9 ; Wt = Wt-16 + sigma1(Wt-2) + Wt-7
+
+ vprord TMP4, %%WTp1, 7 ; ROR_7(Wt-15)
+ vprord TMP5, %%WTp1, 18 ; ROR_18(Wt-15)
+ vpsrld TMP6, %%WTp1, 3 ; SHR_3(Wt-15)
+ vpternlogd TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma0(Wt-15)
+
+ vpaddd %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma1(Wt-2) +
+ ; Wt-7 + sigma0(Wt-15) +
+%endmacro
+
+; Note this is reading in two blocks of data from each lane,
+; in preparation for the upcoming needed transpose to build msg schedule.
+; Each register will contain 32 bytes from one lane plus 32 bytes
+; from another lane.
+; The first 8 registers will contain the first 32 bytes of all lanes,
+; where register X (0 <= X <= 7) will contain bytes 0-31 from lane X in the first half
+; and 0-31 bytes from lane X+8 in the second half.
+; The last 8 registers will contain the last 32 bytes of all lanes,
+; where register Y (8 <= Y <= 15) wil contain bytes 32-63 from lane Y-8 in the first half
+; and 32-63 bytes from lane Y in the second half.
+; This method helps reducing the number of shuffles required to transpose the data.
+%macro MSG_SCHED_ROUND_00_15 6
+%define %%Wt %1 ; [out] zmm register to load the next block
+%define %%LANE_IDX %2 ; [in] lane index (0-15)
+%define %%BASE_PTR %3 ; [in] base address of the input data
+%define %%OFFSET_PTR %4 ; [in] offset to get next block of data from the lane
+%define %%TMP1 %5 ; [clobbered] temporary gp register
+%define %%TMP2 %6 ; [clobbered] temporary gp register
+%if (%%LANE_IDX < 8)
+ mov %%TMP1, [%%BASE_PTR + %%LANE_IDX*PTR_SZ]
+ mov %%TMP2, [%%BASE_PTR + (%%LANE_IDX+8)*PTR_SZ]
+ vmovups YWORD(%%Wt), [%%TMP1+%%OFFSET_PTR]
+ vinserti64x4 %%Wt, %%Wt, [%%TMP2+%%OFFSET_PTR], 0x01
+%else
+ mov %%TMP1, [%%BASE_PTR + (%%LANE_IDX-8)*PTR_SZ]
+ mov %%TMP2, [%%BASE_PTR + %%LANE_IDX*PTR_SZ]
+ vmovups YWORD(%%Wt), [%%TMP1+%%OFFSET_PTR+32]
+ vinserti64x4 %%Wt, %%Wt, [%%TMP2+%%OFFSET_PTR+32], 0x01
+%endif
+%endmacro
+
+ section .data
+default rel
+align 64
+TABLE:
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK:
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+ ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+;; void sha256_x16_avx512(void **input_data, UINT128 *digest[16], UINT64 size)
+;; arg 1 : pointer to SHA256 args structure
+;; arg 2 : size (in blocks) ;; assumed to be >= 1
+;; arg 1 : rcx : pointer to array of pointers to input data
+;; arg 2 : rdx : pointer to array of pointers to digest
+;; arg 3 : r8 : size of input in bytes
+MKGLOBAL(sha256_x16_avx512,function,internal)
+align 64
+sha256_x16_avx512:
+ mov rax, rsp
+ sub rsp, STACK_SPACE
+ and rsp, ~63 ; align stack to multiple of 64
+ mov [rsp + _rsp], rax
+
+ ;; Initialize digests
+ vmovdqu32 A, [STATE + 0*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 B, [STATE + 1*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 C, [STATE + 2*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 D, [STATE + 3*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 E, [STATE + 4*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 F, [STATE + 5*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 G, [STATE + 6*SHA256_DIGEST_ROW_SIZE]
+ vmovdqu32 H, [STATE + 7*SHA256_DIGEST_ROW_SIZE]
+
+ lea TBL, [rel TABLE]
+
+ ; Do we need to transpose digests???
+ ; SHA1 does not, but SHA256 has been
+
+ xor IDX, IDX
+
+ ;; Load first blocks of data into ZMM registers before
+ ;; performing a 16x16 32-bit transpose.
+ ;; To speed up the transpose, data is loaded in chunks of 32 bytes,
+ ;; interleaving data between lane X and lane X+8.
+ ;; This way, final shuffles between top half and bottom half
+ ;; of the matrix are avoided.
+ mov inp0, [STATE + _data_ptr_sha256 + 0*PTR_SZ]
+ mov inp1, [STATE + _data_ptr_sha256 + 1*PTR_SZ]
+ mov inp2, [STATE + _data_ptr_sha256 + 2*PTR_SZ]
+ mov inp3, [STATE + _data_ptr_sha256 + 3*PTR_SZ]
+ mov inp4, [STATE + _data_ptr_sha256 + 4*PTR_SZ]
+ mov inp5, [STATE + _data_ptr_sha256 + 5*PTR_SZ]
+ mov inp6, [STATE + _data_ptr_sha256 + 6*PTR_SZ]
+ mov inp7, [STATE + _data_ptr_sha256 + 7*PTR_SZ]
+
+ TRANSPOSE16_U32_LOAD_FIRST8 W0, W1, W2, W3, W4, W5, W6, W7, \
+ W8, W9, W10, W11, W12, W13, W14, W15, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+
+ mov inp0, [STATE + _data_ptr_sha256 + 8*PTR_SZ]
+ mov inp1, [STATE + _data_ptr_sha256 + 9*PTR_SZ]
+ mov inp2, [STATE + _data_ptr_sha256 +10*PTR_SZ]
+ mov inp3, [STATE + _data_ptr_sha256 +11*PTR_SZ]
+ mov inp4, [STATE + _data_ptr_sha256 +12*PTR_SZ]
+ mov inp5, [STATE + _data_ptr_sha256 +13*PTR_SZ]
+ mov inp6, [STATE + _data_ptr_sha256 +14*PTR_SZ]
+ mov inp7, [STATE + _data_ptr_sha256 +15*PTR_SZ]
+
+ TRANSPOSE16_U32_LOAD_LAST8 W0, W1, W2, W3, W4, W5, W6, W7, \
+ W8, W9, W10, W11, W12, W13, W14, W15, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+
+ align 32
+lloop:
+ vmovdqa32 TMP2, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+ vmovdqa32 TMP3, [TBL] ; First K
+
+ ; Save digests for later addition
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*0], A
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*1], B
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*2], C
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*3], D
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*4], E
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*5], F
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*6], G
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*7], H
+
+ add IDX, 64
+
+ TRANSPOSE16_U32 W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1, TMP4, TMP5
+
+%assign I 0
+%rep 16
+ vpshufb APPEND(W,I), APPEND(W,I), TMP2
+%assign I (I+1)
+%endrep
+
+ ; MSG Schedule for W0-W15 is now complete in registers
+ ; Process first 48 rounds
+ ; Calculate next Wt+16 after processing is complete and Wt is unneeded
+
+ ; PROCESS_LOOP_00_47 APPEND(W,J), I, APPEND(W,K), APPEND(W,L), APPEND(W,M)
+%assign I 0
+%assign J 0
+%assign K 1
+%assign L 9
+%assign M 14
+%rep 48
+ PROCESS_LOOP APPEND(W,J), I
+ MSG_SCHED_ROUND_16_63 APPEND(W,J), APPEND(W,K), APPEND(W,L), APPEND(W,M)
+%assign I (I+1)
+%assign J ((J+1)% 16)
+%assign K ((K+1)% 16)
+%assign L ((L+1)% 16)
+%assign M ((M+1)% 16)
+%endrep
+
+ ; Check is this is the last block
+ sub INP_SIZE, 1
+ je lastLoop
+
+ ; Process last 16 rounds
+ ; Read in next block msg data for use in first 16 words of msg sched
+%assign I 48
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+ MSG_SCHED_ROUND_00_15 APPEND(W,J), J, STATE + _data_ptr_sha256, IDX, inp0, inp1
+%assign I (I+1)
+%assign J (J+1)
+%endrep
+
+ ; Add old digest
+ vpaddd A, A, [rsp + _DIGEST_SAVE + 64*0]
+ vpaddd B, B, [rsp + _DIGEST_SAVE + 64*1]
+ vpaddd C, C, [rsp + _DIGEST_SAVE + 64*2]
+ vpaddd D, D, [rsp + _DIGEST_SAVE + 64*3]
+ vpaddd E, E, [rsp + _DIGEST_SAVE + 64*4]
+ vpaddd F, F, [rsp + _DIGEST_SAVE + 64*5]
+ vpaddd G, G, [rsp + _DIGEST_SAVE + 64*6]
+ vpaddd H, H, [rsp + _DIGEST_SAVE + 64*7]
+
+ jmp lloop
+
+lastLoop:
+ ; Process last 16 rounds
+%assign I 48
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+%assign I (I+1)
+%assign J (J+1)
+%endrep
+
+ ; Add old digest
+ vpaddd A, A, [rsp + _DIGEST_SAVE + 64*0]
+ vpaddd B, B, [rsp + _DIGEST_SAVE + 64*1]
+ vpaddd C, C, [rsp + _DIGEST_SAVE + 64*2]
+ vpaddd D, D, [rsp + _DIGEST_SAVE + 64*3]
+ vpaddd E, E, [rsp + _DIGEST_SAVE + 64*4]
+ vpaddd F, F, [rsp + _DIGEST_SAVE + 64*5]
+ vpaddd G, G, [rsp + _DIGEST_SAVE + 64*6]
+ vpaddd H, H, [rsp + _DIGEST_SAVE + 64*7]
+
+ ; Write out digest
+ ; Do we need to untranspose digests???
+ vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A
+ vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B
+ vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C
+ vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D
+ vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E
+ vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F
+ vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G
+ vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H
+
+ ; update input pointers
+%assign I 0
+%rep 16
+ add [STATE + _data_ptr_sha256 + I*PTR_SZ], IDX
+%assign I (I+1)
+%endrep
+
+%ifdef SAFE_DATA
+ ;; Clear stack frame (8*64 bytes)
+ vpxorq zmm0, zmm0
+%assign i 0
+%rep 8
+ vmovdqa64 [rsp + i*64], zmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ mov rsp, [rsp + _rsp]
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/avx512/sha512_x8_avx512.asm b/src/spdk/intel-ipsec-mb/avx512/sha512_x8_avx512.asm
new file mode 100644
index 000000000..48532c3fb
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/avx512/sha512_x8_avx512.asm
@@ -0,0 +1,595 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RDX RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; Windows preserves: RBX RCX RBP RSI
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RDX RSI R8 R9 R10 R11 R12 R13 R14 R15
+;; Linux preserves: RBX RCX RBP RDI
+;; -----------------------------------------------------------
+;; Clobbers ZMM0-31
+
+;; code to compute quad SHA512 using AVX512
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/transpose_avx512.asm"
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+; Linux register definitions
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+; Windows definitions
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rsi
+%define arg4 rdi
+%endif
+
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX arg4
+%define TBL r8
+
+;; retaining XMM_SAVE, because the top half of YMM registers no saving required, only bottom half, the XMM part
+%define NUM_LANES 8
+%define XMM_SAVE (15-5)*16
+%define SZ 8
+%define SZ8 8 * SZ
+%define DIGEST_SZ 8 * SZ8
+%define DIGEST_SAVE NUM_LANES * DIGEST_SZ
+%define RSP_SAVE 1*8
+
+
+; Define Stack Layout
+START_FIELDS
+;;; name size align
+FIELD _DIGEST_SAVE, NUM_LANES*8*64, 64
+FIELD _XMM_SAVE, XMM_SAVE, 16
+FIELD _RSP, 8, 8
+%assign STACK_SPACE _FIELD_OFFSET
+
+%define inp0 r9
+%define inp1 r10
+%define inp2 r11
+%define inp3 r12
+%define inp4 r13
+%define inp5 r14
+%define inp6 r15
+%define inp7 rax
+
+%define A zmm0
+%define B zmm1
+%define C zmm2
+%define D zmm3
+%define E zmm4
+%define F zmm5
+%define G zmm6
+%define H zmm7
+%define T1 zmm8
+%define TMP0 zmm9
+%define TMP1 zmm10
+%define TMP2 zmm11
+%define TMP3 zmm12
+%define TMP4 zmm13
+%define TMP5 zmm14
+%define TMP6 zmm15
+
+
+%define W0 zmm16
+%define W1 zmm17
+%define W2 zmm18
+%define W3 zmm19
+%define W4 zmm20
+%define W5 zmm21
+%define W6 zmm22
+%define W7 zmm23
+%define W8 zmm24
+%define W9 zmm25
+%define W10 zmm26
+%define W11 zmm27
+%define W12 zmm28
+%define W13 zmm29
+%define W14 zmm30
+%define W15 zmm31
+
+; from sha256_fips180-2.pdf
+; define rotates for Sigma function for main loop steps
+%define BIG_SIGMA_0_0 28 ; Sigma0
+%define BIG_SIGMA_0_1 34
+%define BIG_SIGMA_0_2 39
+%define BIG_SIGMA_1_0 14 ; Sigma1
+%define BIG_SIGMA_1_1 18
+%define BIG_SIGMA_1_2 41
+
+; define rotates for Sigma function for scheduling steps
+%define SMALL_SIGMA_0_0 1 ; sigma0
+%define SMALL_SIGMA_0_1 8
+%define SMALL_SIGMA_0_2 7
+%define SMALL_SIGMA_1_0 19 ; sigma1
+%define SMALL_SIGMA_1_1 61
+%define SMALL_SIGMA_1_2 6
+
+%define SHA_MAX_ROUNDS 80
+%define SHA_ROUNDS_LESS_16 (SHA_MAX_ROUNDS - 16)
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ H
+%xdefine H G
+%xdefine G F
+%xdefine F E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+;; CH(A, B, C) = (A&B) ^ (~A&C)
+;; MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G)
+;; SIGMA0 = ROR_28 ^ ROR_34 ^ ROR_39
+;; SIGMA1 = ROR_14 ^ ROR_18 ^ ROR_41
+;; sigma0 = ROR_1 ^ ROR_8 ^ SHR_7
+;; sigma1 = ROR_19 ^ ROR_61 ^ SHR_6
+
+;; Main processing loop per round
+;; equivalent to %macro ROUND_00_15 2
+%macro PROCESS_LOOP 2
+%define %%WT %1
+%define %%ROUND %2
+ ;; T1 = H + BIG_SIGMA_1(E) + CH(E, F, G) + Kt + Wt
+ ;; T2 = BIG_SIGMA_0(A) + MAJ(A, B, C)
+ ;; H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
+
+ ;; H becomes T2, then add T1 for A
+ ;; D becomes D + T1 for E
+
+ vpaddq T1, H, TMP3 ; T1 = H + Kt
+ vmovdqa32 TMP0, E
+ ;; compute BIG_SIGMA_1(E)
+ vprorq TMP1, E, BIG_SIGMA_1_0 ; ROR_14(E)
+ vprorq TMP2, E, BIG_SIGMA_1_1 ; ROR_18(E)
+ vprorq TMP3, E, BIG_SIGMA_1_2 ; ROR_41(E)
+ vpternlogq TMP1, TMP2, TMP3, 0x96 ; TMP1 = BIG_SIGMA_1(E)
+ vpternlogq TMP0, F, G, 0xCA ; TMP0 = CH(E,F,G)
+ vpaddq T1, T1, %%WT ; T1 = T1 + Wt
+ vpaddq T1, T1, TMP0 ; T1 = T1 + CH(E,F,G)
+ vpaddq T1, T1, TMP1 ; T1 = T1 + BIG_SIGMA_1(E)
+ vpaddq D, D, T1 ; D = D + T1
+ vprorq H, A, BIG_SIGMA_0_0 ;ROR_28(A)
+ vprorq TMP2, A, BIG_SIGMA_0_1 ;ROR_34(A)
+ vprorq TMP3, A, BIG_SIGMA_0_2 ;ROR_39(A)
+ vmovdqa32 TMP0, A
+ vpternlogq TMP0, B, C, 0xE8 ; TMP0 = MAJ(A,B,C)
+ vpternlogq H, TMP2, TMP3, 0x96 ; H(T2) = BIG_SIGMA_0(A)
+ vpaddq H, H, TMP0 ; H(T2) = BIG_SIGMA_0(A) + MAJ(A,B,C)
+ vpaddq H, H, T1 ; H(A) = H(T2) + T1
+ vmovdqa32 TMP3, [TBL + ((%%ROUND+1)*64)] ; Next Kt
+
+ ;; Rotate the args A-H (rotation of names associated with regs)
+ ROTATE_ARGS
+%endmacro
+
+%macro MSG_SCHED_ROUND_16_79 4
+%define %%WT %1
+%define %%WTp1 %2
+%define %%WTp9 %3
+%define %%WTp14 %4
+ vprorq TMP4, %%WTp14, SMALL_SIGMA_1_0 ; ROR_19(Wt-2)
+ vprorq TMP5, %%WTp14, SMALL_SIGMA_1_1 ; ROR_61(Wt-2)
+ vpsrlq TMP6, %%WTp14, SMALL_SIGMA_1_2 ; SHR_6(Wt-2)
+ vpternlogq TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma_1(Wt-2)
+
+ vpaddq %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma_1(Wt-2)
+ vpaddq %%WT, %%WT, %%WTp9 ; Wt = Wt-16 + sigma_1(Wt-2) + Wt-7
+
+ vprorq TMP4, %%WTp1, SMALL_SIGMA_0_0 ; ROR_1(Wt-15)
+ vprorq TMP5, %%WTp1, SMALL_SIGMA_0_1 ; ROR_8(Wt-15)
+ vpsrlq TMP6, %%WTp1, SMALL_SIGMA_0_2 ; SHR_7(Wt-15)
+ vpternlogq TMP4, TMP5, TMP6, 0x96 ; TMP4 = sigma_0(Wt-15)
+
+ vpaddq %%WT, %%WT, TMP4 ; Wt = Wt-16 + sigma_1(Wt-2) +
+ ; Wt-7 + sigma_0(Wt-15) +
+%endmacro
+
+section .data
+default rel
+
+align 64
+; 80 constants for SHA512
+; replicating for each lane, thus 8*80
+; to aid in SIMD .. space tradeoff for time!
+; local to asm file, used nowhere else
+TABLE:
+ dq 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22
+ dq 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22, 0x428a2f98d728ae22
+ dq 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd
+ dq 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd, 0x7137449123ef65cd
+ dq 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f
+ dq 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f
+ dq 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc
+ dq 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc
+ dq 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538
+ dq 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538, 0x3956c25bf348b538
+ dq 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019
+ dq 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019, 0x59f111f1b605d019
+ dq 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b
+ dq 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b, 0x923f82a4af194f9b
+ dq 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118
+ dq 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118
+ dq 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242
+ dq 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242, 0xd807aa98a3030242
+ dq 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe
+ dq 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe, 0x12835b0145706fbe
+ dq 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c
+ dq 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c, 0x243185be4ee4b28c
+ dq 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2
+ dq 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2
+ dq 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f
+ dq 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f, 0x72be5d74f27b896f
+ dq 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1
+ dq 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1
+ dq 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235
+ dq 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235, 0x9bdc06a725c71235
+ dq 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694
+ dq 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694, 0xc19bf174cf692694
+ dq 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2
+ dq 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2
+ dq 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3
+ dq 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3, 0xefbe4786384f25e3
+ dq 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5
+ dq 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5
+ dq 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65
+ dq 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65
+ dq 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275
+ dq 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275, 0x2de92c6f592b0275
+ dq 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483
+ dq 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483
+ dq 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4
+ dq 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4
+ dq 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5
+ dq 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5, 0x76f988da831153b5
+ dq 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab
+ dq 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab, 0x983e5152ee66dfab
+ dq 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210
+ dq 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210, 0xa831c66d2db43210
+ dq 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f
+ dq 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f, 0xb00327c898fb213f
+ dq 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4
+ dq 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4
+ dq 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2
+ dq 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2
+ dq 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725
+ dq 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725, 0xd5a79147930aa725
+ dq 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f
+ dq 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f, 0x06ca6351e003826f
+ dq 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70
+ dq 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70, 0x142929670a0e6e70
+ dq 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc
+ dq 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc, 0x27b70a8546d22ffc
+ dq 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926
+ dq 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926, 0x2e1b21385c26c926
+ dq 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed
+ dq 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed
+ dq 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df
+ dq 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df, 0x53380d139d95b3df
+ dq 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de
+ dq 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de, 0x650a73548baf63de
+ dq 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8
+ dq 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8
+ dq 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6
+ dq 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6, 0x81c2c92e47edaee6
+ dq 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b
+ dq 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b, 0x92722c851482353b
+ dq 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364
+ dq 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364
+ dq 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001
+ dq 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001, 0xa81a664bbc423001
+ dq 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791
+ dq 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791, 0xc24b8b70d0f89791
+ dq 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30
+ dq 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30, 0xc76c51a30654be30
+ dq 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218
+ dq 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218, 0xd192e819d6ef5218
+ dq 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910
+ dq 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910, 0xd69906245565a910
+ dq 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a
+ dq 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a, 0xf40e35855771202a
+ dq 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8
+ dq 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8, 0x106aa07032bbd1b8
+ dq 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8
+ dq 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8
+ dq 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53
+ dq 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53, 0x1e376c085141ab53
+ dq 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99
+ dq 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99, 0x2748774cdf8eeb99
+ dq 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8
+ dq 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8
+ dq 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63
+ dq 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63
+ dq 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb
+ dq 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb
+ dq 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373
+ dq 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373, 0x5b9cca4f7763e373
+ dq 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3
+ dq 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3
+ dq 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc
+ dq 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc, 0x748f82ee5defb2fc
+ dq 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60
+ dq 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60, 0x78a5636f43172f60
+ dq 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72
+ dq 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72, 0x84c87814a1f0ab72
+ dq 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec
+ dq 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec, 0x8cc702081a6439ec
+ dq 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28
+ dq 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28, 0x90befffa23631e28
+ dq 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9
+ dq 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9, 0xa4506cebde82bde9
+ dq 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915
+ dq 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915
+ dq 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b
+ dq 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b, 0xc67178f2e372532b
+ dq 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c
+ dq 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c, 0xca273eceea26619c
+ dq 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207
+ dq 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207, 0xd186b8c721c0c207
+ dq 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e
+ dq 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e
+ dq 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178
+ dq 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178
+ dq 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba
+ dq 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba, 0x06f067aa72176fba
+ dq 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6
+ dq 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6
+ dq 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae
+ dq 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae, 0x113f9804bef90dae
+ dq 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b
+ dq 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b, 0x1b710b35131c471b
+ dq 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84
+ dq 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84, 0x28db77f523047d84
+ dq 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493
+ dq 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493, 0x32caab7b40c72493
+ dq 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc
+ dq 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc
+ dq 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c
+ dq 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c, 0x431d67c49c100d4c
+ dq 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6
+ dq 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6
+ dq 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a
+ dq 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a, 0x597f299cfc657e2a
+ dq 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec
+ dq 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec
+ dq 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817
+ dq 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817, 0x6c44198c4a475817
+
+align 64
+; this does the big endian to little endian conversion over a quad word .. ZMM
+;; shuffle on ZMM is shuffle on 4 XMM size chunks, 128 bits
+PSHUFFLE_BYTE_FLIP_MASK:
+ ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+ ;ddq 0x18191a1b1c1d1e1f1011121314151617
+ dq 0x1011121314151617, 0x18191a1b1c1d1e1f
+ ;ddq 0x28292a2b2c2d2e2f2021222324252627
+ dq 0x2021222324252627, 0x28292a2b2c2d2e2f
+ ;ddq 0x38393a3b3c3d3e3f3031323334353637
+ dq 0x3031323334353637, 0x38393a3b3c3d3e3f
+
+section .text
+
+;; void sha512_x8_avx512(void *input_data, UINT64 *digest[NUM_LANES], const int size)
+;; arg 1 : rcx : pointer to input data
+;; arg 2 : rdx : pointer to UINT64 digest[8][num_lanes]
+;; arg 3 : size in message block lengths (= 128 bytes)
+MKGLOBAL(sha512_x8_avx512,function,internal)
+align 64
+sha512_x8_avx512:
+ mov rax, rsp
+ sub rsp, STACK_SPACE
+ and rsp, ~63 ; align stack to multiple of 64
+ mov [rsp + _RSP], rax
+
+ ;; Initialize digests ; organized uint64 digest[8][num_lanes]; no transpose required
+ ;; Digest is an array of pointers to digests
+ vmovdqu32 A, [STATE + 0*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 B, [STATE + 1*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 C, [STATE + 2*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 D, [STATE + 3*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 E, [STATE + 4*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 F, [STATE + 5*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 G, [STATE + 6*SHA512_DIGEST_ROW_SIZE]
+ vmovdqu32 H, [STATE + 7*SHA512_DIGEST_ROW_SIZE]
+
+ lea TBL,[rel TABLE]
+ xor IDX, IDX
+ ;; Read in input data address, saving them in registers because
+ ;; they will serve as variables, which we shall keep incrementing
+ mov inp0, [STATE + _data_ptr_sha512 + 0*PTR_SZ]
+ mov inp1, [STATE + _data_ptr_sha512 + 1*PTR_SZ]
+ mov inp2, [STATE + _data_ptr_sha512 + 2*PTR_SZ]
+ mov inp3, [STATE + _data_ptr_sha512 + 3*PTR_SZ]
+ mov inp4, [STATE + _data_ptr_sha512 + 4*PTR_SZ]
+ mov inp5, [STATE + _data_ptr_sha512 + 5*PTR_SZ]
+ mov inp6, [STATE + _data_ptr_sha512 + 6*PTR_SZ]
+ mov inp7, [STATE + _data_ptr_sha512 + 7*PTR_SZ]
+ jmp lloop
+
+align 32
+lloop:
+ ;; Load 64-byte blocks of data into ZMM registers before
+ ;; performing a 8x8 64-bit transpose.
+ ;; To speed up the transpose, data is loaded in chunks of 32 bytes,
+ ;; interleaving data between lane X and lane X+4.
+ ;; This way, final shuffles between top half and bottom half
+ ;; of the matrix are avoided.
+ TRANSPOSE8_U64_LOAD8 W0, W1, W2, W3, W4, W5, W6, W7, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX
+
+ TRANSPOSE8_U64 W0, W1, W2, W3, W4, W5, W6, W7, TMP0, TMP1, TMP2, TMP3
+ ;; Load next 512 bytes
+ TRANSPOSE8_U64_LOAD8 W8, W9, W10, W11, W12, W13, W14, W15, \
+ inp0, inp1, inp2, inp3, inp4, inp5, \
+ inp6, inp7, IDX+SZ8
+
+ TRANSPOSE8_U64 W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1, TMP2, TMP3
+
+ vmovdqa32 TMP2, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+ vmovdqa32 TMP3, [TBL] ; First K
+
+ ; Save digests for later addition
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*0], A
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*1], B
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*2], C
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*3], D
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*4], E
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*5], F
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*6], G
+ vmovdqa32 [rsp + _DIGEST_SAVE + 64*7], H
+
+ add IDX, 128 ; increment by message block length in bytes
+
+%assign I 0
+%rep 16
+;;; little endian to big endian
+ vpshufb APPEND(W,I), APPEND(W,I), TMP2
+%assign I (I+1)
+%endrep
+
+ ; MSG Schedule for W0-W15 is now complete in registers
+ ; Process first (max-rounds -16)
+ ; Calculate next Wt+16 after processing is complete and Wt is unneeded
+ ; PROCESS_LOOP_00_79 APPEND(W,J), I, APPEND(W,K), APPEND(W,L), APPEND(W,M)
+
+%assign I 0
+%assign J 0
+%assign K 1
+%assign L 9
+%assign M 14
+%rep SHA_ROUNDS_LESS_16
+ PROCESS_LOOP APPEND(W,J), I
+ MSG_SCHED_ROUND_16_79 APPEND(W,J), APPEND(W,K), APPEND(W,L), APPEND(W,M)
+%assign I (I+1)
+%assign J ((J+1)% 16)
+%assign K ((K+1)% 16)
+%assign L ((L+1)% 16)
+%assign M ((M+1)% 16)
+%endrep
+ ; Check is this is the last block
+ sub INP_SIZE, 1
+ je lastLoop
+
+ ; Process last 16 rounds
+ ; Read in next block msg data for use in first 16 words of msg sched
+%assign I SHA_ROUNDS_LESS_16
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+%assign I (I+1)
+%assign J (J+1)
+%endrep
+ ; Add old digest
+ vpaddq A, A, [rsp + _DIGEST_SAVE + 64*0]
+ vpaddq B, B, [rsp + _DIGEST_SAVE + 64*1]
+ vpaddq C, C, [rsp + _DIGEST_SAVE + 64*2]
+ vpaddq D, D, [rsp + _DIGEST_SAVE + 64*3]
+ vpaddq E, E, [rsp + _DIGEST_SAVE + 64*4]
+ vpaddq F, F, [rsp + _DIGEST_SAVE + 64*5]
+ vpaddq G, G, [rsp + _DIGEST_SAVE + 64*6]
+ vpaddq H, H, [rsp + _DIGEST_SAVE + 64*7]
+
+ jmp lloop
+
+align 32
+lastLoop:
+ ; Process last 16 rounds
+%assign I SHA_ROUNDS_LESS_16
+%assign J 0
+%rep 16
+ PROCESS_LOOP APPEND(W,J), I
+%assign I (I+1)
+%assign J (J+1)
+%endrep
+
+ ; Add old digest
+ vpaddq A, A, [rsp + _DIGEST_SAVE + 64*0]
+ vpaddq B, B, [rsp + _DIGEST_SAVE + 64*1]
+ vpaddq C, C, [rsp + _DIGEST_SAVE + 64*2]
+ vpaddq D, D, [rsp + _DIGEST_SAVE + 64*3]
+ vpaddq E, E, [rsp + _DIGEST_SAVE + 64*4]
+ vpaddq F, F, [rsp + _DIGEST_SAVE + 64*5]
+ vpaddq G, G, [rsp + _DIGEST_SAVE + 64*6]
+ vpaddq H, H, [rsp + _DIGEST_SAVE + 64*7]
+
+ ; Write out digest
+ ;; results in A, B, C, D, E, F, G, H
+ vmovdqu32 [STATE + 0*SHA512_DIGEST_ROW_SIZE], A
+ vmovdqu32 [STATE + 1*SHA512_DIGEST_ROW_SIZE], B
+ vmovdqu32 [STATE + 2*SHA512_DIGEST_ROW_SIZE], C
+ vmovdqu32 [STATE + 3*SHA512_DIGEST_ROW_SIZE], D
+ vmovdqu32 [STATE + 4*SHA512_DIGEST_ROW_SIZE], E
+ vmovdqu32 [STATE + 5*SHA512_DIGEST_ROW_SIZE], F
+ vmovdqu32 [STATE + 6*SHA512_DIGEST_ROW_SIZE], G
+ vmovdqu32 [STATE + 7*SHA512_DIGEST_ROW_SIZE], H
+
+ ; update input pointers
+%assign I 0
+%rep 8
+ add [STATE + _data_ptr_sha512 + I*PTR_SZ], IDX
+%assign I (I+1)
+%endrep
+
+
+%ifdef SAFE_DATA
+ ;; Clear stack frame ((NUM_LANES*8)*64 bytes)
+ vpxorq zmm0, zmm0
+%assign i 0
+%rep (NUM_LANES*8)
+ vmovdqa64 [rsp + i*64], zmm0
+%assign i (i+1)
+%endrep
+%endif
+ mov rsp, [rsp + _RSP]
+;hash_done:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/const.asm b/src/spdk/intel-ipsec-mb/const.asm
new file mode 100644
index 000000000..69666039e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/const.asm
@@ -0,0 +1,89 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+
+section .data
+default rel
+
+MKGLOBAL(len_shift_tab,data,internal)
+MKGLOBAL(len_mask_tab,data,internal)
+MKGLOBAL(padding_0x80_tab16,data,internal)
+MKGLOBAL(shift_tab_16,data,internal)
+
+;;; The following tables are used to insert a word into
+;;; a SIMD register and must be defined together.
+;;; If resized, update len_tab_diff definition in const.inc module.
+;;; Other modifications may require updates to dependant modules.
+
+;;; Table used to shuffle word to correct index
+;;; Used by macros:
+;;; - PINSRW_COMMON
+;;; - XPINSRW
+;;; - XVPINSRW
+align 16
+len_shift_tab:
+ db 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01
+
+;;; Table used to zero index
+align 16
+len_mask_tab:
+ dw 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ dw 0xffff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ dw 0xffff, 0xffff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+ dw 0xffff, 0xffff, 0xffff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff,
+ dw 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0xffff, 0xffff, 0xffff,
+ dw 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0xffff, 0xffff,
+ dw 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0xffff,
+ dw 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000
+
+;;; Table to do 0x80 byte shift for padding prefix
+align 16
+padding_0x80_tab16:
+ db 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ db 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+
+;;; Table for shifting bytes in 128 bit SIMD register
+align 16
+shift_tab_16:
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ db 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ db 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/constants.asm b/src/spdk/intel-ipsec-mb/constants.asm
new file mode 100644
index 000000000..6e5c5285c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/constants.asm
@@ -0,0 +1,66 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; Generic constants
+%define PTR_SZ 8
+
+;;; hash constants
+
+%define MD5_DIGEST_WORD_SIZE 4
+%define SHA1_DIGEST_WORD_SIZE 4
+%define SHA256_DIGEST_WORD_SIZE 4
+%define SHA512_DIGEST_WORD_SIZE 8
+;; AVX512 constants
+%define MAX_MD5_LANES 32
+%define MAX_SHA1_LANES 16
+%define MAX_SHA256_LANES 16
+%define MAX_SHA512_LANES 8
+
+%define NUM_MD5_DIGEST_WORDS 4
+%define NUM_SHA1_DIGEST_WORDS 5
+%define NUM_SHA256_DIGEST_WORDS 8
+%define NUM_SHA512_DIGEST_WORDS 8
+
+%define MD5_DIGEST_ROW_SIZE (MAX_MD5_LANES * MD5_DIGEST_WORD_SIZE)
+%define SHA1_DIGEST_ROW_SIZE (MAX_SHA1_LANES * SHA1_DIGEST_WORD_SIZE)
+%define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
+%define SHA512_DIGEST_ROW_SIZE (MAX_SHA512_LANES * SHA512_DIGEST_WORD_SIZE)
+
+%define MD5_DIGEST_SIZE (MD5_DIGEST_ROW_SIZE * NUM_MD5_DIGEST_WORDS)
+%define SHA1_DIGEST_SIZE (SHA1_DIGEST_ROW_SIZE * NUM_SHA1_DIGEST_WORDS)
+%define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * NUM_SHA256_DIGEST_WORDS)
+%define SHA512_DIGEST_SIZE (SHA512_DIGEST_ROW_SIZE * NUM_SHA512_DIGEST_WORDS)
+
+;; Used only by SHA-NI implementations
+;; Sanity checks to fail build if not satisfied
+%define SHA1NI_DIGEST_ROW_SIZE (NUM_SHA1_DIGEST_WORDS * SHA1_DIGEST_WORD_SIZE)
+%define SHA256NI_DIGEST_ROW_SIZE (NUM_SHA256_DIGEST_WORDS * SHA256_DIGEST_WORD_SIZE)
+
+%define MD5_BLK_SZ 128 ; in bytes
+%define SHA1_BLK_SZ 64 ; in bytes
+%define SHA256_BLK_SZ 64 ; in bytes
+%define SHA512_BLK_SZ 128 ; in bytes
diff --git a/src/spdk/intel-ipsec-mb/constants.h b/src/spdk/intel-ipsec-mb/constants.h
new file mode 100644
index 000000000..9c863b9ee
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/constants.h
@@ -0,0 +1,83 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef IMB_CONSTANTS_H_
+#define IMB_CONSTANTS_H_
+
+/* define SHA1 constants */
+#define H0 0x67452301
+#define H1 0xefcdab89
+#define H2 0x98badcfe
+#define H3 0x10325476
+#define H4 0xc3d2e1f0
+#define SHA1_PAD_SIZE 8
+
+/* define SHA256 constants */
+#define SHA256_H0 0x6a09e667
+#define SHA256_H1 0xbb67ae85
+#define SHA256_H2 0x3c6ef372
+#define SHA256_H3 0xa54ff53a
+#define SHA256_H4 0x510e527f
+#define SHA256_H5 0x9b05688c
+#define SHA256_H6 0x1f83d9ab
+#define SHA256_H7 0x5be0cd19
+#define SHA256_PAD_SIZE 8
+
+/* define SHA224 constants */
+#define SHA224_H0 0xc1059ed8
+#define SHA224_H1 0x367cd507
+#define SHA224_H2 0x3070dd17
+#define SHA224_H3 0xf70e5939
+#define SHA224_H4 0xffc00b31
+#define SHA224_H5 0x68581511
+#define SHA224_H6 0x64f98fa7
+#define SHA224_H7 0xbefa4fa4
+#define SHA224_PAD_SIZE 8
+
+/* define SHA512 constants */
+#define SHA512_H0 0x6a09e667f3bcc908
+#define SHA512_H1 0xbb67ae8584caa73b
+#define SHA512_H2 0x3c6ef372fe94f82b
+#define SHA512_H3 0xa54ff53a5f1d36f1
+#define SHA512_H4 0x510e527fade682d1
+#define SHA512_H5 0x9b05688c2b3e6c1f
+#define SHA512_H6 0x1f83d9abfb41bd6b
+#define SHA512_H7 0x5be0cd19137e2179
+#define SHA512_PAD_SIZE 16
+
+/* define SHA384 constants */
+#define SHA384_H0 0xcbbb9d5dc1059ed8
+#define SHA384_H1 0x629a292a367cd507
+#define SHA384_H2 0x9159015a3070dd17
+#define SHA384_H3 0x152fecd8f70e5939
+#define SHA384_H4 0x67332667ffc00b31
+#define SHA384_H5 0x8eb44a8768581511
+#define SHA384_H6 0xdb0c2e0d64f98fa7
+#define SHA384_H7 0x47b5481dbefa4fa4
+#define SHA384_PAD_SIZE 16
+
+#endif /* IMB_CONSTANTS_H_ */
diff --git a/src/spdk/intel-ipsec-mb/cpu_feature.c b/src/spdk/intel-ipsec-mb/cpu_feature.c
new file mode 100644
index 000000000..15f782794
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/cpu_feature.c
@@ -0,0 +1,230 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdint.h>
+#ifdef __WIN32
+#include <intrin.h>
+#endif
+
+#include "cpu_feature.h"
+
+struct cpuid_regs {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+};
+
+static struct cpuid_regs cpuid_1_0;
+static struct cpuid_regs cpuid_7_0;
+
+/*
+ * A C wrapper for CPUID opcode
+ *
+ * Parameters:
+ * [in] leaf - CPUID leaf number (EAX)
+ * [in] subleaf - CPUID sub-leaf number (ECX)
+ * [out] out - registers structure to store results of CPUID into
+ */
+static void
+__mbcpuid(const unsigned leaf, const unsigned subleaf, struct cpuid_regs *out)
+{
+#ifdef _WIN32
+ /* Windows */
+ int regs[4];
+
+ __cpuidex(regs, leaf, subleaf);
+ out->eax = regs[0];
+ out->ebx = regs[1];
+ out->ecx = regs[2];
+ out->edx = regs[3];
+#else
+ /* Linux */
+ asm volatile("mov %4, %%eax\n\t"
+ "mov %5, %%ecx\n\t"
+ "cpuid\n\t"
+ "mov %%eax, %0\n\t"
+ "mov %%ebx, %1\n\t"
+ "mov %%ecx, %2\n\t"
+ "mov %%edx, %3\n\t"
+ : "=g" (out->eax), "=g" (out->ebx), "=g" (out->ecx),
+ "=g" (out->edx)
+ : "g" (leaf), "g" (subleaf)
+ : "%eax", "%ebx", "%ecx", "%edx");
+#endif /* Linux */
+}
+
+static uint32_t detect_shani(void)
+{
+ /* Check presence of SHANI - bit 29 of EBX */
+ return (cpuid_7_0.ebx & (1 << 29));
+}
+
+static uint32_t detect_aesni(void)
+{
+ /* Check presence of AESNI - bit 25 of ECX */
+ return (cpuid_1_0.ecx & (1 << 25));
+}
+
+static uint32_t detect_pclmulqdq(void)
+{
+ /* Check presence of PCLMULQDQ - bit 1 of ECX */
+ return (cpuid_1_0.ecx & (1 << 1));
+}
+
+static uint32_t detect_cmov(void)
+{
+ /* Check presence of CMOV - bit 15 of EDX */
+ return (cpuid_1_0.edx & (1 << 15));
+}
+
+static uint32_t detect_sse42(void)
+{
+ /* Check presence of SSE4.2 - bit 20 of ECX */
+ return (cpuid_1_0.ecx & (1 << 20));
+}
+
+static uint32_t detect_avx(void)
+{
+ /* Check presence of AVX - bit 28 of ECX */
+ return (cpuid_1_0.ecx & (1 << 28));
+}
+
+static uint32_t detect_avx2(void)
+{
+ /* Check presence of AVX2 - bit 5 of EBX */
+ return (cpuid_7_0.ebx & (1 << 5));
+}
+
+static uint32_t detect_avx512f(void)
+{
+ /* Check presence of AVX512F - bit 16 of EBX */
+ return (cpuid_7_0.ebx & (1 << 16));
+}
+
+static uint32_t detect_avx512dq(void)
+{
+ /* Check presence of AVX512DQ - bit 17 of EBX */
+ return (cpuid_7_0.ebx & (1 << 17));
+}
+
+static uint32_t detect_avx512cd(void)
+{
+ /* Check presence of AVX512CD - bit 28 of EBX */
+ return (cpuid_7_0.ebx & (1 << 28));
+}
+
+static uint32_t detect_avx512bw(void)
+{
+ /* Check presence of AVX512BW - bit 30 of EBX */
+ return (cpuid_7_0.ebx & (1 << 30));
+}
+
+static uint32_t detect_avx512vl(void)
+{
+ /* Check presence of AVX512VL - bit 31 of EBX */
+ return (cpuid_7_0.ebx & (1 << 31));
+}
+
+static uint32_t detect_vaes(void)
+{
+ /* Check presence of VAES - bit 9 of ECX */
+ return (cpuid_7_0.ecx & (1 << 9));
+}
+
+static uint32_t detect_vpclmulqdq(void)
+{
+ /* Check presence of VAES - bit 10 of ECX */
+ return (cpuid_7_0.ecx & (1 << 10));
+}
+
+uint64_t cpu_feature_detect(void)
+{
+ static const struct {
+ unsigned req_leaf_number;
+ uint64_t feat;
+ uint32_t (*detect_fn)(void);
+ } feat_tab[] = {
+ { 7, IMB_FEATURE_SHANI, detect_shani },
+ { 1, IMB_FEATURE_AESNI, detect_aesni },
+ { 1, IMB_FEATURE_PCLMULQDQ, detect_pclmulqdq },
+ { 1, IMB_FEATURE_CMOV, detect_cmov },
+ { 1, IMB_FEATURE_SSE4_2, detect_sse42 },
+ { 1, IMB_FEATURE_AVX, detect_avx },
+ { 7, IMB_FEATURE_AVX2, detect_avx2 },
+ { 7, IMB_FEATURE_AVX512F, detect_avx512f },
+ { 7, IMB_FEATURE_AVX512DQ, detect_avx512dq },
+ { 7, IMB_FEATURE_AVX512CD, detect_avx512cd },
+ { 7, IMB_FEATURE_AVX512BW, detect_avx512bw },
+ { 7, IMB_FEATURE_AVX512VL, detect_avx512vl },
+ { 7, IMB_FEATURE_VAES, detect_vaes },
+ { 7, IMB_FEATURE_VPCLMULQDQ, detect_vpclmulqdq },
+ };
+ struct cpuid_regs r;
+ unsigned hi_leaf_number = 0;
+ uint64_t features = 0;
+ unsigned i;
+
+ /* Get highest supported CPUID leaf number */
+ __mbcpuid(0x0, 0x0, &r);
+ hi_leaf_number = r.eax;
+
+ /* Get the most common CPUID leafs to speed up the detection */
+ if (hi_leaf_number >= 1)
+ __mbcpuid(0x1, 0x0, &cpuid_1_0);
+
+ if (hi_leaf_number >= 7)
+ __mbcpuid(0x7, 0x0, &cpuid_7_0);
+
+ for (i = 0; i < IMB_DIM(feat_tab); i++) {
+ if (hi_leaf_number < feat_tab[i].req_leaf_number)
+ continue;
+
+ if (feat_tab[i].detect_fn() != 0)
+ features |= feat_tab[i].feat;
+ }
+
+#ifdef SAFE_DATA
+ features |= IMB_FEATURE_SAFE_DATA;
+#endif
+#ifdef SAFE_PARAM
+ features |= IMB_FEATURE_SAFE_PARAM;
+#endif
+
+ return features;
+}
+
+uint64_t cpu_feature_adjust(const uint64_t flags, uint64_t features)
+{
+ if (flags & IMB_FLAG_SHANI_OFF)
+ features &= ~IMB_FEATURE_SHANI;
+
+ if (flags & IMB_FLAG_AESNI_OFF)
+ features &= ~IMB_FEATURE_AESNI;
+
+ return features;
+}
diff --git a/src/spdk/intel-ipsec-mb/des.h b/src/spdk/intel-ipsec-mb/des.h
new file mode 100644
index 000000000..ad4d338ec
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/des.h
@@ -0,0 +1,111 @@
+/*******************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef IMB_DES_H
+#define IMB_DES_H
+
+#include <stdint.h>
+
+/**
+ * @brief DES CBC encryption
+ *
+ * @param input source buffer with plain text
+ * @param output destination buffer for cipher text
+ * @param size number of bytes to encrypt (multiple of 8)
+ * @param ks pointer to key schedule structure
+ * @param ivec pointer to initialization vector
+ */
+void des_enc_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec);
+
+/**
+ * @brief DES CBC decryption
+ *
+ * @param input source buffer with cipher text
+ * @param output destination buffer for plain text
+ * @param size number of bytes to decrypt (multiple of 8)
+ * @param ks pointer to key schedule structure
+ * @param ivec pointer to initialization vector
+ */
+void des_dec_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec);
+
+/**
+ * @brief 3DES CBC encryption
+ *
+ * @param input source buffer with plain text
+ * @param output destination buffer for cipher text
+ * @param size number of bytes to encrypt (multiple of 8)
+ * @param ks1 pointer to key schedule 1 structure
+ * @param ks2 pointer to key schedule 2 structure
+ * @param ks3 pointer to key schedule 3 structure
+ * @param ivec pointer to initialization vector
+ */
+void des3_enc_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks1, const uint64_t *ks2,
+ const uint64_t *ks3, const uint64_t *ivec);
+
+/**
+ * @brief 3DES CBC decryption
+ *
+ * @param input source buffer with cipher text
+ * @param output destination buffer for plain text
+ * @param size number of bytes to decrypt (multiple of 8)
+ * @param ks1 pointer to key schedule 1 structure
+ * @param ks2 pointer to key schedule 2 structure
+ * @param ks3 pointer to key schedule 3 structure
+ * @param ivec pointer to initialization vector
+ */
+void des3_dec_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks1, const uint64_t *ks2,
+ const uint64_t *ks3, const uint64_t *ivec);
+
+/**
+ * @brief DOCSIS DES encryption
+ *
+ * @param input source buffer with plain text
+ * @param output destination buffer for cipher text
+ * @param size number of bytes to encrypt
+ * @param ks pointer to key schedule structure
+ * @param ivec pointer to initialization vector
+ */
+void docsis_des_enc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec);
+
+/**
+ * @brief DOCSIS DES decryption
+ *
+ * @param input source buffer with cipher text
+ * @param output destination buffer for plain text
+ * @param size number of bytes to decrypt
+ * @param ks pointer to key schedule structure
+ * @param ivec pointer to initialization vector
+ */
+void docsis_des_dec_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec);
+
+#endif /* IMB_DES_H */
diff --git a/src/spdk/intel-ipsec-mb/des_basic.c b/src/spdk/intel-ipsec-mb/des_basic.c
new file mode 100644
index 000000000..8dc28712e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/des_basic.c
@@ -0,0 +1,750 @@
+/*******************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/* basic DES implementation */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "des.h"
+#include "des_utils.h"
+#include "include/clear_regs_mem.h"
+#include "include/constant_lookup.h"
+
+__forceinline
+void permute_operation(uint32_t *pa, uint32_t *pb,
+ const uint32_t n, const uint32_t m)
+{
+ register uint32_t t = (*pb ^ (*pa >> n)) & m;
+
+ *pb ^= t;
+ *pa ^= (t << n);
+}
+
+/* inital permutation */
+__forceinline
+void ip_z(uint32_t *pl, uint32_t *pr)
+{
+ permute_operation(pr, pl, 4, 0x0f0f0f0f);
+ permute_operation(pl, pr, 16, 0x0000ffff);
+ permute_operation(pr, pl, 2, 0x33333333);
+ permute_operation(pl, pr, 8, 0x00ff00ff);
+ permute_operation(pr, pl, 1, 0x55555555);
+}
+
+/* final permuation */
+__forceinline
+void fp_z(uint32_t *pl, uint32_t *pr)
+{
+ permute_operation(pl, pr, 1, 0x55555555);
+ permute_operation(pr, pl, 8, 0x00ff00ff);
+ permute_operation(pl, pr, 2, 0x33333333);
+ permute_operation(pr, pl, 16, 0x0000ffff);
+ permute_operation(pl, pr, 4, 0x0f0f0f0f);
+}
+
+/* 1st part of DES round
+ * - permutes and exands R(32 bits) into 48 bits
+ */
+__forceinline
+uint64_t e_phase(const uint64_t R)
+{
+ /* E phase as in FIPS46-3 and also 8x6 to 8x8 expansion.
+ *
+ * Bit selection table for this operation looks as follows:
+ * 32, 1, 2, 3, 4, 5, X, X,
+ * 4, 5, 6, 7, 8, 9, X, X,
+ * 8, 9, 10, 11, 12, 13, X, X,
+ * 12, 13, 14, 15, 16, 17, X, X,
+ * 16, 17, 18, 19, 20, 21, X, X,
+ * 20, 21, 22, 23, 24, 25, X, X,
+ * 24, 25, 26, 27, 28, 29, X, X,
+ * 28, 29, 30, 31, 32, 1, X, X
+ * where 'X' is bit value 0.
+ */
+ return ((R << 1) & UINT64_C(0x3e)) | ((R >> 31) & UINT64_C(1)) |
+ ((R << 5) & UINT64_C(0x3f00)) |
+ ((R << 9) & UINT64_C(0x3f0000)) |
+ ((R << 13) & UINT64_C(0x3f000000)) |
+ ((R << 17) & UINT64_C(0x3f00000000)) |
+ ((R << 21) & UINT64_C(0x3f0000000000)) |
+ ((R << 25) & UINT64_C(0x3f000000000000)) |
+ ((R << 29) & UINT64_C(0x1f00000000000000)) |
+ ((R & UINT64_C(1)) << 61);
+}
+
+static const uint32_t sbox0p[64] = {
+ UINT32_C(0x00410100), UINT32_C(0x00010000),
+ UINT32_C(0x40400000), UINT32_C(0x40410100),
+ UINT32_C(0x00400000), UINT32_C(0x40010100),
+ UINT32_C(0x40010000), UINT32_C(0x40400000),
+ UINT32_C(0x40010100), UINT32_C(0x00410100),
+ UINT32_C(0x00410000), UINT32_C(0x40000100),
+ UINT32_C(0x40400100), UINT32_C(0x00400000),
+ UINT32_C(0x00000000), UINT32_C(0x40010000),
+ UINT32_C(0x00010000), UINT32_C(0x40000000),
+ UINT32_C(0x00400100), UINT32_C(0x00010100),
+ UINT32_C(0x40410100), UINT32_C(0x00410000),
+ UINT32_C(0x40000100), UINT32_C(0x00400100),
+ UINT32_C(0x40000000), UINT32_C(0x00000100),
+ UINT32_C(0x00010100), UINT32_C(0x40410000),
+ UINT32_C(0x00000100), UINT32_C(0x40400100),
+ UINT32_C(0x40410000), UINT32_C(0x00000000),
+ UINT32_C(0x00000000), UINT32_C(0x40410100),
+ UINT32_C(0x00400100), UINT32_C(0x40010000),
+ UINT32_C(0x00410100), UINT32_C(0x00010000),
+ UINT32_C(0x40000100), UINT32_C(0x00400100),
+ UINT32_C(0x40410000), UINT32_C(0x00000100),
+ UINT32_C(0x00010100), UINT32_C(0x40400000),
+ UINT32_C(0x40010100), UINT32_C(0x40000000),
+ UINT32_C(0x40400000), UINT32_C(0x00410000),
+ UINT32_C(0x40410100), UINT32_C(0x00010100),
+ UINT32_C(0x00410000), UINT32_C(0x40400100),
+ UINT32_C(0x00400000), UINT32_C(0x40000100),
+ UINT32_C(0x40010000), UINT32_C(0x00000000),
+ UINT32_C(0x00010000), UINT32_C(0x00400000),
+ UINT32_C(0x40400100), UINT32_C(0x00410100),
+ UINT32_C(0x40000000), UINT32_C(0x40410000),
+ UINT32_C(0x00000100), UINT32_C(0x40010100)
+};
+
+static const uint32_t sbox1p[64] = {
+ UINT32_C(0x08021002), UINT32_C(0x00000000),
+ UINT32_C(0x00021000), UINT32_C(0x08020000),
+ UINT32_C(0x08000002), UINT32_C(0x00001002),
+ UINT32_C(0x08001000), UINT32_C(0x00021000),
+ UINT32_C(0x00001000), UINT32_C(0x08020002),
+ UINT32_C(0x00000002), UINT32_C(0x08001000),
+ UINT32_C(0x00020002), UINT32_C(0x08021000),
+ UINT32_C(0x08020000), UINT32_C(0x00000002),
+ UINT32_C(0x00020000), UINT32_C(0x08001002),
+ UINT32_C(0x08020002), UINT32_C(0x00001000),
+ UINT32_C(0x00021002), UINT32_C(0x08000000),
+ UINT32_C(0x00000000), UINT32_C(0x00020002),
+ UINT32_C(0x08001002), UINT32_C(0x00021002),
+ UINT32_C(0x08021000), UINT32_C(0x08000002),
+ UINT32_C(0x08000000), UINT32_C(0x00020000),
+ UINT32_C(0x00001002), UINT32_C(0x08021002),
+ UINT32_C(0x00020002), UINT32_C(0x08021000),
+ UINT32_C(0x08001000), UINT32_C(0x00021002),
+ UINT32_C(0x08021002), UINT32_C(0x00020002),
+ UINT32_C(0x08000002), UINT32_C(0x00000000),
+ UINT32_C(0x08000000), UINT32_C(0x00001002),
+ UINT32_C(0x00020000), UINT32_C(0x08020002),
+ UINT32_C(0x00001000), UINT32_C(0x08000000),
+ UINT32_C(0x00021002), UINT32_C(0x08001002),
+ UINT32_C(0x08021000), UINT32_C(0x00001000),
+ UINT32_C(0x00000000), UINT32_C(0x08000002),
+ UINT32_C(0x00000002), UINT32_C(0x08021002),
+ UINT32_C(0x00021000), UINT32_C(0x08020000),
+ UINT32_C(0x08020002), UINT32_C(0x00020000),
+ UINT32_C(0x00001002), UINT32_C(0x08001000),
+ UINT32_C(0x08001002), UINT32_C(0x00000002),
+ UINT32_C(0x08020000), UINT32_C(0x00021000)
+};
+
+static const uint32_t sbox2p[64] = {
+ UINT32_C(0x20800000), UINT32_C(0x00808020),
+ UINT32_C(0x00000020), UINT32_C(0x20800020),
+ UINT32_C(0x20008000), UINT32_C(0x00800000),
+ UINT32_C(0x20800020), UINT32_C(0x00008020),
+ UINT32_C(0x00800020), UINT32_C(0x00008000),
+ UINT32_C(0x00808000), UINT32_C(0x20000000),
+ UINT32_C(0x20808020), UINT32_C(0x20000020),
+ UINT32_C(0x20000000), UINT32_C(0x20808000),
+ UINT32_C(0x00000000), UINT32_C(0x20008000),
+ UINT32_C(0x00808020), UINT32_C(0x00000020),
+ UINT32_C(0x20000020), UINT32_C(0x20808020),
+ UINT32_C(0x00008000), UINT32_C(0x20800000),
+ UINT32_C(0x20808000), UINT32_C(0x00800020),
+ UINT32_C(0x20008020), UINT32_C(0x00808000),
+ UINT32_C(0x00008020), UINT32_C(0x00000000),
+ UINT32_C(0x00800000), UINT32_C(0x20008020),
+ UINT32_C(0x00808020), UINT32_C(0x00000020),
+ UINT32_C(0x20000000), UINT32_C(0x00008000),
+ UINT32_C(0x20000020), UINT32_C(0x20008000),
+ UINT32_C(0x00808000), UINT32_C(0x20800020),
+ UINT32_C(0x00000000), UINT32_C(0x00808020),
+ UINT32_C(0x00008020), UINT32_C(0x20808000),
+ UINT32_C(0x20008000), UINT32_C(0x00800000),
+ UINT32_C(0x20808020), UINT32_C(0x20000000),
+ UINT32_C(0x20008020), UINT32_C(0x20800000),
+ UINT32_C(0x00800000), UINT32_C(0x20808020),
+ UINT32_C(0x00008000), UINT32_C(0x00800020),
+ UINT32_C(0x20800020), UINT32_C(0x00008020),
+ UINT32_C(0x00800020), UINT32_C(0x00000000),
+ UINT32_C(0x20808000), UINT32_C(0x20000020),
+ UINT32_C(0x20800000), UINT32_C(0x20008020),
+ UINT32_C(0x00000020), UINT32_C(0x00808000)
+};
+
+static const uint32_t sbox3p[64] = {
+ UINT32_C(0x00080201), UINT32_C(0x02000200),
+ UINT32_C(0x00000001), UINT32_C(0x02080201),
+ UINT32_C(0x00000000), UINT32_C(0x02080000),
+ UINT32_C(0x02000201), UINT32_C(0x00080001),
+ UINT32_C(0x02080200), UINT32_C(0x02000001),
+ UINT32_C(0x02000000), UINT32_C(0x00000201),
+ UINT32_C(0x02000001), UINT32_C(0x00080201),
+ UINT32_C(0x00080000), UINT32_C(0x02000000),
+ UINT32_C(0x02080001), UINT32_C(0x00080200),
+ UINT32_C(0x00000200), UINT32_C(0x00000001),
+ UINT32_C(0x00080200), UINT32_C(0x02000201),
+ UINT32_C(0x02080000), UINT32_C(0x00000200),
+ UINT32_C(0x00000201), UINT32_C(0x00000000),
+ UINT32_C(0x00080001), UINT32_C(0x02080200),
+ UINT32_C(0x02000200), UINT32_C(0x02080001),
+ UINT32_C(0x02080201), UINT32_C(0x00080000),
+ UINT32_C(0x02080001), UINT32_C(0x00000201),
+ UINT32_C(0x00080000), UINT32_C(0x02000001),
+ UINT32_C(0x00080200), UINT32_C(0x02000200),
+ UINT32_C(0x00000001), UINT32_C(0x02080000),
+ UINT32_C(0x02000201), UINT32_C(0x00000000),
+ UINT32_C(0x00000200), UINT32_C(0x00080001),
+ UINT32_C(0x00000000), UINT32_C(0x02080001),
+ UINT32_C(0x02080200), UINT32_C(0x00000200),
+ UINT32_C(0x02000000), UINT32_C(0x02080201),
+ UINT32_C(0x00080201), UINT32_C(0x00080000),
+ UINT32_C(0x02080201), UINT32_C(0x00000001),
+ UINT32_C(0x02000200), UINT32_C(0x00080201),
+ UINT32_C(0x00080001), UINT32_C(0x00080200),
+ UINT32_C(0x02080000), UINT32_C(0x02000201),
+ UINT32_C(0x00000201), UINT32_C(0x02000000),
+ UINT32_C(0x02000001), UINT32_C(0x02080200)
+};
+
+static const uint32_t sbox4p[64] = {
+ UINT32_C(0x01000000), UINT32_C(0x00002000),
+ UINT32_C(0x00000080), UINT32_C(0x01002084),
+ UINT32_C(0x01002004), UINT32_C(0x01000080),
+ UINT32_C(0x00002084), UINT32_C(0x01002000),
+ UINT32_C(0x00002000), UINT32_C(0x00000004),
+ UINT32_C(0x01000004), UINT32_C(0x00002080),
+ UINT32_C(0x01000084), UINT32_C(0x01002004),
+ UINT32_C(0x01002080), UINT32_C(0x00000000),
+ UINT32_C(0x00002080), UINT32_C(0x01000000),
+ UINT32_C(0x00002004), UINT32_C(0x00000084),
+ UINT32_C(0x01000080), UINT32_C(0x00002084),
+ UINT32_C(0x00000000), UINT32_C(0x01000004),
+ UINT32_C(0x00000004), UINT32_C(0x01000084),
+ UINT32_C(0x01002084), UINT32_C(0x00002004),
+ UINT32_C(0x01002000), UINT32_C(0x00000080),
+ UINT32_C(0x00000084), UINT32_C(0x01002080),
+ UINT32_C(0x01002080), UINT32_C(0x01000084),
+ UINT32_C(0x00002004), UINT32_C(0x01002000),
+ UINT32_C(0x00002000), UINT32_C(0x00000004),
+ UINT32_C(0x01000004), UINT32_C(0x01000080),
+ UINT32_C(0x01000000), UINT32_C(0x00002080),
+ UINT32_C(0x01002084), UINT32_C(0x00000000),
+ UINT32_C(0x00002084), UINT32_C(0x01000000),
+ UINT32_C(0x00000080), UINT32_C(0x00002004),
+ UINT32_C(0x01000084), UINT32_C(0x00000080),
+ UINT32_C(0x00000000), UINT32_C(0x01002084),
+ UINT32_C(0x01002004), UINT32_C(0x01002080),
+ UINT32_C(0x00000084), UINT32_C(0x00002000),
+ UINT32_C(0x00002080), UINT32_C(0x01002004),
+ UINT32_C(0x01000080), UINT32_C(0x00000084),
+ UINT32_C(0x00000004), UINT32_C(0x00002084),
+ UINT32_C(0x01002000), UINT32_C(0x01000004)
+};
+
+const uint32_t sbox5p[64] = {
+ UINT32_C(0x10000008), UINT32_C(0x00040008),
+ UINT32_C(0x00000000), UINT32_C(0x10040400),
+ UINT32_C(0x00040008), UINT32_C(0x00000400),
+ UINT32_C(0x10000408), UINT32_C(0x00040000),
+ UINT32_C(0x00000408), UINT32_C(0x10040408),
+ UINT32_C(0x00040400), UINT32_C(0x10000000),
+ UINT32_C(0x10000400), UINT32_C(0x10000008),
+ UINT32_C(0x10040000), UINT32_C(0x00040408),
+ UINT32_C(0x00040000), UINT32_C(0x10000408),
+ UINT32_C(0x10040008), UINT32_C(0x00000000),
+ UINT32_C(0x00000400), UINT32_C(0x00000008),
+ UINT32_C(0x10040400), UINT32_C(0x10040008),
+ UINT32_C(0x10040408), UINT32_C(0x10040000),
+ UINT32_C(0x10000000), UINT32_C(0x00000408),
+ UINT32_C(0x00000008), UINT32_C(0x00040400),
+ UINT32_C(0x00040408), UINT32_C(0x10000400),
+ UINT32_C(0x00000408), UINT32_C(0x10000000),
+ UINT32_C(0x10000400), UINT32_C(0x00040408),
+ UINT32_C(0x10040400), UINT32_C(0x00040008),
+ UINT32_C(0x00000000), UINT32_C(0x10000400),
+ UINT32_C(0x10000000), UINT32_C(0x00000400),
+ UINT32_C(0x10040008), UINT32_C(0x00040000),
+ UINT32_C(0x00040008), UINT32_C(0x10040408),
+ UINT32_C(0x00040400), UINT32_C(0x00000008),
+ UINT32_C(0x10040408), UINT32_C(0x00040400),
+ UINT32_C(0x00040000), UINT32_C(0x10000408),
+ UINT32_C(0x10000008), UINT32_C(0x10040000),
+ UINT32_C(0x00040408), UINT32_C(0x00000000),
+ UINT32_C(0x00000400), UINT32_C(0x10000008),
+ UINT32_C(0x10000408), UINT32_C(0x10040400),
+ UINT32_C(0x10040000), UINT32_C(0x00000408),
+ UINT32_C(0x00000008), UINT32_C(0x10040008)
+};
+
+static const uint32_t sbox6p[64] = {
+ UINT32_C(0x00000800), UINT32_C(0x00000040),
+ UINT32_C(0x00200040), UINT32_C(0x80200000),
+ UINT32_C(0x80200840), UINT32_C(0x80000800),
+ UINT32_C(0x00000840), UINT32_C(0x00000000),
+ UINT32_C(0x00200000), UINT32_C(0x80200040),
+ UINT32_C(0x80000040), UINT32_C(0x00200800),
+ UINT32_C(0x80000000), UINT32_C(0x00200840),
+ UINT32_C(0x00200800), UINT32_C(0x80000040),
+ UINT32_C(0x80200040), UINT32_C(0x00000800),
+ UINT32_C(0x80000800), UINT32_C(0x80200840),
+ UINT32_C(0x00000000), UINT32_C(0x00200040),
+ UINT32_C(0x80200000), UINT32_C(0x00000840),
+ UINT32_C(0x80200800), UINT32_C(0x80000840),
+ UINT32_C(0x00200840), UINT32_C(0x80000000),
+ UINT32_C(0x80000840), UINT32_C(0x80200800),
+ UINT32_C(0x00000040), UINT32_C(0x00200000),
+ UINT32_C(0x80000840), UINT32_C(0x00200800),
+ UINT32_C(0x80200800), UINT32_C(0x80000040),
+ UINT32_C(0x00000800), UINT32_C(0x00000040),
+ UINT32_C(0x00200000), UINT32_C(0x80200800),
+ UINT32_C(0x80200040), UINT32_C(0x80000840),
+ UINT32_C(0x00000840), UINT32_C(0x00000000),
+ UINT32_C(0x00000040), UINT32_C(0x80200000),
+ UINT32_C(0x80000000), UINT32_C(0x00200040),
+ UINT32_C(0x00000000), UINT32_C(0x80200040),
+ UINT32_C(0x00200040), UINT32_C(0x00000840),
+ UINT32_C(0x80000040), UINT32_C(0x00000800),
+ UINT32_C(0x80200840), UINT32_C(0x00200000),
+ UINT32_C(0x00200840), UINT32_C(0x80000000),
+ UINT32_C(0x80000800), UINT32_C(0x80200840),
+ UINT32_C(0x80200000), UINT32_C(0x00200840),
+ UINT32_C(0x00200800), UINT32_C(0x80000800)
+};
+
+static const uint32_t sbox7p[64] = {
+ UINT32_C(0x04100010), UINT32_C(0x04104000),
+ UINT32_C(0x00004010), UINT32_C(0x00000000),
+ UINT32_C(0x04004000), UINT32_C(0x00100010),
+ UINT32_C(0x04100000), UINT32_C(0x04104010),
+ UINT32_C(0x00000010), UINT32_C(0x04000000),
+ UINT32_C(0x00104000), UINT32_C(0x00004010),
+ UINT32_C(0x00104010), UINT32_C(0x04004010),
+ UINT32_C(0x04000010), UINT32_C(0x04100000),
+ UINT32_C(0x00004000), UINT32_C(0x00104010),
+ UINT32_C(0x00100010), UINT32_C(0x04004000),
+ UINT32_C(0x04104010), UINT32_C(0x04000010),
+ UINT32_C(0x00000000), UINT32_C(0x00104000),
+ UINT32_C(0x04000000), UINT32_C(0x00100000),
+ UINT32_C(0x04004010), UINT32_C(0x04100010),
+ UINT32_C(0x00100000), UINT32_C(0x00004000),
+ UINT32_C(0x04104000), UINT32_C(0x00000010),
+ UINT32_C(0x00100000), UINT32_C(0x00004000),
+ UINT32_C(0x04000010), UINT32_C(0x04104010),
+ UINT32_C(0x00004010), UINT32_C(0x04000000),
+ UINT32_C(0x00000000), UINT32_C(0x00104000),
+ UINT32_C(0x04100010), UINT32_C(0x04004010),
+ UINT32_C(0x04004000), UINT32_C(0x00100010),
+ UINT32_C(0x04104000), UINT32_C(0x00000010),
+ UINT32_C(0x00100010), UINT32_C(0x04004000),
+ UINT32_C(0x04104010), UINT32_C(0x00100000),
+ UINT32_C(0x04100000), UINT32_C(0x04000010),
+ UINT32_C(0x00104000), UINT32_C(0x00004010),
+ UINT32_C(0x04004010), UINT32_C(0x04100000),
+ UINT32_C(0x00000010), UINT32_C(0x04104000),
+ UINT32_C(0x00104010), UINT32_C(0x00000000),
+ UINT32_C(0x04000000), UINT32_C(0x04100010),
+ UINT32_C(0x00004000), UINT32_C(0x00104010)
+};
+
+__forceinline
+uint32_t fRK(const uint32_t R, const uint64_t K)
+{
+ uint64_t x;
+
+ /* Combined e-phase and 8x6bits to 8x8bits expansion.
+ * 32 bits -> 48 bits permutation
+ */
+ x = e_phase((uint64_t) R) ^ K;
+
+ /* Combined s-box and p-phase.
+ * s-box: 48 bits -> 32 bits
+ * p-phase: 32 bits -> 32 bites permutation
+ */
+ return ((LOOKUP32_SSE(sbox0p, ((x >> (8 * 0)) & 0x3f),
+ sizeof(sbox0p))) |
+ (LOOKUP32_SSE(sbox1p, ((x >> (8 * 1)) & 0x3f),
+ sizeof(sbox1p))) |
+ (LOOKUP32_SSE(sbox2p, ((x >> (8 * 2)) & 0x3f),
+ sizeof(sbox2p))) |
+ (LOOKUP32_SSE(sbox3p, ((x >> (8 * 3)) & 0x3f),
+ sizeof(sbox3p))) |
+ (LOOKUP32_SSE(sbox4p, ((x >> (8 * 4)) & 0x3f),
+ sizeof(sbox4p))) |
+ (LOOKUP32_SSE(sbox5p, ((x >> (8 * 5)) & 0x3f),
+ sizeof(sbox5p))) |
+ (LOOKUP32_SSE(sbox6p, ((x >> (8 * 6)) & 0x3f),
+ sizeof(sbox6p))) |
+ (LOOKUP32_SSE(sbox7p, ((x >> (8 * 7)) & 0x3f),
+ sizeof(sbox7p))));
+}
+
+__forceinline
+uint64_t enc_dec_1(const uint64_t data, const uint64_t *ks, const int enc)
+{
+ uint32_t l, r;
+
+ r = (uint32_t) (data);
+ l = (uint32_t) (data >> 32);
+ ip_z(&r, &l);
+
+ if (enc) {
+ l ^= fRK(r, ks[0]);
+ r ^= fRK(l, ks[1]);
+ l ^= fRK(r, ks[2]);
+ r ^= fRK(l, ks[3]);
+ l ^= fRK(r, ks[4]);
+ r ^= fRK(l, ks[5]);
+ l ^= fRK(r, ks[6]);
+ r ^= fRK(l, ks[7]);
+ l ^= fRK(r, ks[8]);
+ r ^= fRK(l, ks[9]);
+ l ^= fRK(r, ks[10]);
+ r ^= fRK(l, ks[11]);
+ l ^= fRK(r, ks[12]);
+ r ^= fRK(l, ks[13]);
+ l ^= fRK(r, ks[14]);
+ r ^= fRK(l, ks[15]);
+ } else {
+ l ^= fRK(r, ks[15]); /* l: l0 -> r1/l2 */
+ r ^= fRK(l, ks[14]); /* r: r0 -> r2 */
+ l ^= fRK(r, ks[13]);
+ r ^= fRK(l, ks[12]);
+ l ^= fRK(r, ks[11]);
+ r ^= fRK(l, ks[10]);
+ l ^= fRK(r, ks[9]);
+ r ^= fRK(l, ks[8]);
+ l ^= fRK(r, ks[7]);
+ r ^= fRK(l, ks[6]);
+ l ^= fRK(r, ks[5]);
+ r ^= fRK(l, ks[4]);
+ l ^= fRK(r, ks[3]);
+ r ^= fRK(l, ks[2]);
+ l ^= fRK(r, ks[1]);
+ r ^= fRK(l, ks[0]);
+ }
+
+ fp_z(&r, &l);
+ return ((uint64_t) l) | (((uint64_t) r) << 32);
+}
+
+IMB_DLL_LOCAL
+void
+des_enc_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks == NULL) || (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / 8;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ for (n = 0; n < nblocks; n++)
+ out[n] = iv = enc_dec_1(in[n] ^ iv, ks, 1 /* encrypt */);
+
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
+
+IMB_DLL_LOCAL
+void
+des_dec_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks == NULL) || (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / 8;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ for (n = 0; n < nblocks; n++) {
+ uint64_t in_block = in[n];
+
+ out[n] = enc_dec_1(in_block, ks, 0 /* decrypt */) ^ iv;
+ iv = in_block;
+ }
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
+
+IMB_DLL_LOCAL
+void
+des3_enc_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks1, const uint64_t *ks2,
+ const uint64_t *ks3, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks1 == NULL) || (ks2 == NULL) || (ks3 == NULL) ||
+ (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / 8;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks1 != NULL);
+ IMB_ASSERT(ks2 != NULL);
+ IMB_ASSERT(ks3 != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ for (n = 0; n < nblocks; n++) {
+ uint64_t t = in[n] ^ iv;
+
+ t = enc_dec_1(t, ks1, 1 /* encrypt */);
+ t = enc_dec_1(t, ks2, 0 /* decrypt */);
+ t = enc_dec_1(t, ks3, 1 /* encrypt */);
+ out[n] = iv = t;
+ }
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
+
+IMB_DLL_LOCAL
+void
+des3_dec_cbc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks1, const uint64_t *ks2,
+ const uint64_t *ks3, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks1 == NULL) || (ks2 == NULL) || (ks3 == NULL) ||
+ (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / 8;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks1 != NULL);
+ IMB_ASSERT(ks2 != NULL);
+ IMB_ASSERT(ks3 != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ for (n = 0; n < nblocks; n++) {
+ uint64_t t;
+ const uint64_t next_iv = in[n];
+
+ t = enc_dec_1(next_iv, ks3, 0 /* decrypt */);
+ t = enc_dec_1(t, ks2, 1 /* encrypt */);
+ t = enc_dec_1(t, ks1, 0 /* decrypt */);
+ out[n] = t ^ iv;
+
+ iv = next_iv;
+ }
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
+
+__forceinline
+void
+cfb_one_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks == NULL) || (ivec == NULL) || (size < 0))
+ return;
+#endif
+ uint8_t *out = (uint8_t *) output;
+ const uint8_t *in = (const uint8_t *) input;
+ uint64_t t;
+
+ IMB_ASSERT(size <= 8 && size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ t = enc_dec_1(*ivec, ks, 1 /* encrypt */);
+
+ /* XOR and copy in one go */
+ if (size & 1) {
+ *out++ = *in++ ^ ((uint8_t) t);
+ t >>= 8;
+ }
+
+ if (size & 2) {
+ uint16_t *out2 = (uint16_t *) out;
+ const uint16_t *in2 = (const uint16_t *) in;
+
+ *out2 = *in2 ^ ((uint16_t) t);
+ t >>= 16;
+ out += 2;
+ in += 2;
+ }
+
+ if (size & 4) {
+ uint32_t *out4 = (uint32_t *) out;
+ const uint32_t *in4 = (const uint32_t *) in;
+
+ *out4 = *in4 ^ ((uint32_t) t);
+ }
+
+#ifdef SAFE_DATA
+ clear_var(&t, sizeof(t));
+#endif
+}
+
+IMB_DLL_LOCAL
+void
+docsis_des_enc_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks == NULL) || (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / DES_BLOCK_SIZE;
+ const int partial = size & 7;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ for (n = 0; n < nblocks; n++)
+ out[n] = iv = enc_dec_1(in[n] ^ iv, ks, 1 /* encrypt */);
+
+ if (partial) {
+ if (nblocks)
+ cfb_one_basic(&in[nblocks], &out[nblocks], partial,
+ ks, &out[nblocks - 1]);
+ else
+ cfb_one_basic(input, output, partial, ks, ivec);
+ }
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
+
+IMB_DLL_LOCAL
+void
+docsis_des_dec_basic(const void *input, void *output, const int size,
+ const uint64_t *ks, const uint64_t *ivec)
+{
+#ifdef SAFE_PARAM
+ if ((input == NULL) || (output == NULL) ||
+ (ks == NULL) || (ivec == NULL) || (size < 0))
+ return;
+#endif
+ const uint64_t *in = input;
+ uint64_t *out = output;
+ const int nblocks = size / DES_BLOCK_SIZE;
+ const int partial = size & 7;
+ int n;
+ uint64_t iv = *ivec;
+
+ IMB_ASSERT(size >= 0);
+ IMB_ASSERT(input != NULL);
+ IMB_ASSERT(output != NULL);
+ IMB_ASSERT(ks != NULL);
+ IMB_ASSERT(ivec != NULL);
+
+ if (partial) {
+ if (!nblocks) {
+ /* first block is the partial one */
+ cfb_one_basic(input, output, partial, ks, ivec);
+ iv = 0;
+ return;
+ }
+ /* last block is partial */
+ cfb_one_basic(&in[nblocks], &out[nblocks], partial,
+ ks, &in[nblocks - 1]);
+ }
+
+ for (n = 0; n < nblocks; n++) {
+ uint64_t in_block = in[n];
+
+ out[n] = enc_dec_1(in_block, ks, 0 /* decrypt */) ^ iv;
+ iv = in_block;
+ }
+
+#ifdef SAFE_DATA
+ /* *ivec = iv; */
+ clear_var(&iv, sizeof(iv));
+#endif
+}
diff --git a/src/spdk/intel-ipsec-mb/des_key.c b/src/spdk/intel-ipsec-mb/des_key.c
new file mode 100644
index 000000000..039572f10
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/des_key.c
@@ -0,0 +1,151 @@
+/*******************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "intel-ipsec-mb.h"
+#include "des.h"
+#include "des_utils.h"
+#include "include/clear_regs_mem.h"
+
+/**
+ * @brief Rotates 28-bit word
+ *
+ * Roll right of 28-bit word - used in 28-bit subkey operations
+ *
+ * @param val 28-bit word to be rotated
+ * @param nshift number of bits to rotate by
+ *
+ * @return val rotated by nshift bits
+ */
+__forceinline
+uint32_t rotate28(const uint32_t val, const unsigned nshift)
+{
+ const uint32_t mask = (UINT32_C(1) << 28) - UINT32_C(1);
+
+ IMB_ASSERT(nshift <= 28);
+ return ((val >> nshift) & mask) |
+ ((val << (28 - nshift)) & mask);
+}
+
+/**
+ * @brief Expands 8 groups of 6bits into 8 groups of 8bits
+ *
+ * @param in a 48-bit word including 8 groups of 6bits
+ *
+ * @return 64-bit word with 8 groups of 8bits
+ */
+__forceinline
+uint64_t expand_8x6_to_8x8(const uint64_t in)
+{
+ return (((in >> (6 * 0)) & UINT64_C(63)) << (8 * 0)) |
+ (((in >> (6 * 1)) & UINT64_C(63)) << (8 * 1)) |
+ (((in >> (6 * 2)) & UINT64_C(63)) << (8 * 2)) |
+ (((in >> (6 * 3)) & UINT64_C(63)) << (8 * 3)) |
+ (((in >> (6 * 4)) & UINT64_C(63)) << (8 * 4)) |
+ (((in >> (6 * 5)) & UINT64_C(63)) << (8 * 5)) |
+ (((in >> (6 * 6)) & UINT64_C(63)) << (8 * 6)) |
+ (((in >> (6 * 7)) & UINT64_C(63)) << (8 * 7));
+}
+
+static const uint8_t pc1c_table_fips46_3[28] = {
+ 57, 49, 41, 33, 25, 17, 9,
+ 1, 58, 50, 42, 34, 26, 18,
+ 10, 2, 59, 51, 43, 35, 27,
+ 19, 11, 3, 60, 52, 44, 36
+};
+
+static const uint8_t pc1d_table_fips46_3[28] = {
+ 63, 55, 47, 39, 31, 23, 15,
+ 7, 62, 54, 46, 38, 30, 22,
+ 14, 6, 61, 53, 45, 37, 29,
+ 21, 13, 5, 28, 20, 12, 4
+};
+
+static const uint8_t pc2_table_fips46_3[48] = {
+ 14, 17, 11, 24, 1, 5,
+ 3, 28, 15, 6, 21, 10,
+ 23, 19, 12, 4, 26, 8,
+ 16, 7, 27, 20, 13, 2,
+ 41, 52, 31, 37, 47, 55,
+ 30, 40, 51, 45, 33, 48,
+ 44, 49, 39, 56, 34, 53,
+ 46, 42, 50, 36, 29, 32
+};
+
+static const uint8_t shift_tab_fips46_3[16] = {
+ 1, 1, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 1
+};
+
+int des_key_schedule(uint64_t *ks, const void *key)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || ks == NULL)
+ return -1;
+#endif
+
+ uint64_t c, d;
+ uint64_t t = 0;
+ int n;
+
+ /* KEY: 56 bits but spread across 64 bits
+ * - MSB per byte used for parity
+ * - load_and_convert loads the key and swaps bits in bytes
+ * so that bit numbers are more suitable for LE machine and
+ * FIPS46-3 DES tables
+ */
+ t = load64_reflect(key);
+
+ /* PC1
+ * - built from the KEY, PC1 permute tables skip KEY parity bits
+ * - c & d are both 28 bits
+ */
+ c = permute_64b(t, pc1c_table_fips46_3, IMB_DIM(pc1c_table_fips46_3));
+ d = permute_64b(t, pc1d_table_fips46_3, IMB_DIM(pc1d_table_fips46_3));
+
+ /* KS rounds */
+ for (n = 0; n < 16; n++) {
+ c = rotate28((uint32_t)c, (unsigned) shift_tab_fips46_3[n]);
+ d = rotate28((uint32_t)d, (unsigned) shift_tab_fips46_3[n]);
+
+ /* PC2 */
+ t = permute_64b(c | (d << 28), pc2_table_fips46_3,
+ IMB_DIM(pc2_table_fips46_3));
+
+ /* store KS as 6 bits per byte and keep LE */
+ ks[n] = expand_8x6_to_8x8(t);
+ }
+
+#ifdef SAFE_DATA
+ clear_var(&c, sizeof(c));
+ clear_var(&d, sizeof(d));
+ clear_var(&t, sizeof(t));
+#endif
+ return 0;
+}
diff --git a/src/spdk/intel-ipsec-mb/gcm.c b/src/spdk/intel-ipsec-mb/gcm.c
new file mode 100644
index 000000000..2b8d706a2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/gcm.c
@@ -0,0 +1,225 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdint.h>
+#include "intel-ipsec-mb.h"
+#include "gcm.h"
+#include "noaesni.h"
+
+/**
+ * @brief Pre-processes GCM key data
+ *
+ * Prefills the gcm key data with key values for each round and
+ * the initial sub hash key for tag encoding
+ *
+ * @param key pointer to key data
+ * @param key_data GCM expanded key data
+ *
+ */
+
+void aes_gcm_pre_128_sse(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_sse(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_sse(key_data);
+}
+
+void aes_gcm_pre_128_sse_no_aesni(const void *key,
+ struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_sse_no_aesni(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_sse_no_aesni(key_data);
+}
+
+void aes_gcm_pre_128_avx_gen2(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_avx(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_avx_gen2(key_data);
+}
+
+void aes_gcm_pre_128_avx_gen4(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_avx_gen4(key_data);
+}
+
+void aes_gcm_pre_128_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_avx512(key_data);
+}
+
+void aes_gcm_pre_128_vaes_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_128_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_128_vaes_avx512(key_data);
+}
+
+void aes_gcm_pre_192_sse(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_sse(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_sse(key_data);
+}
+
+void aes_gcm_pre_192_sse_no_aesni(const void *key,
+ struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_sse_no_aesni(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_sse_no_aesni(key_data);
+}
+
+void aes_gcm_pre_192_avx_gen2(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_avx(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_avx_gen2(key_data);
+}
+
+void aes_gcm_pre_192_avx_gen4(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_avx_gen4(key_data);
+}
+
+void aes_gcm_pre_192_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_avx512(key_data);
+}
+
+void aes_gcm_pre_192_vaes_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_192_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_192_vaes_avx512(key_data);
+}
+
+void aes_gcm_pre_256_sse(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_sse(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_sse(key_data);
+}
+
+void aes_gcm_pre_256_sse_no_aesni(const void *key,
+ struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_sse_no_aesni(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_sse_no_aesni(key_data);
+}
+
+void aes_gcm_pre_256_avx_gen2(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_avx(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_avx_gen2(key_data);
+}
+
+void aes_gcm_pre_256_avx_gen4(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_avx_gen4(key_data);
+}
+
+void aes_gcm_pre_256_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_avx512(key_data);
+}
+
+void aes_gcm_pre_256_vaes_avx512(const void *key, struct gcm_key_data *key_data)
+{
+#ifdef SAFE_PARAM
+ if (key == NULL || key_data == NULL)
+ return;
+#endif
+ aes_keyexp_256_enc_avx2(key, key_data->expanded_keys);
+ aes_gcm_precomp_256_vaes_avx512(key_data);
+}
diff --git a/src/spdk/intel-ipsec-mb/include/aes_common.asm b/src/spdk/intel-ipsec-mb/include/aes_common.asm
new file mode 100644
index 000000000..5c8cbb48c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/aes_common.asm
@@ -0,0 +1,375 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _AES_COMMON_ASM_
+%define _AES_COMMON_ASM_
+
+%include "include/reg_sizes.asm"
+
+;; =============================================================================
+;; Generic macro to produce code that executes %%OPCODE instruction
+;; on selected number of AES blocks (16 bytes long ) between 0 and 16.
+;; All three operands of the instruction come from registers.
+;; Note: if 3 blocks are left at the end instruction is produced to operate all
+;; 4 blocks (full width of ZMM)
+
+%macro ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 14
+%define %%NUM_BLOCKS %1 ; [in] numerical value, number of AES blocks (0 to 16)
+%define %%OPCODE %2 ; [in] instruction name
+%define %%DST0 %3 ; [out] destination ZMM register
+%define %%DST1 %4 ; [out] destination ZMM register
+%define %%DST2 %5 ; [out] destination ZMM register
+%define %%DST3 %6 ; [out] destination ZMM register
+%define %%SRC1_0 %7 ; [in] source 1 ZMM register
+%define %%SRC1_1 %8 ; [in] source 1 ZMM register
+%define %%SRC1_2 %9 ; [in] source 1 ZMM register
+%define %%SRC1_3 %10 ; [in] source 1 ZMM register
+%define %%SRC2_0 %11 ; [in] source 2 ZMM register
+%define %%SRC2_1 %12 ; [in] source 2 ZMM register
+%define %%SRC2_2 %13 ; [in] source 2 ZMM register
+%define %%SRC2_3 %14 ; [in] source 2 ZMM register
+
+%assign reg_idx 0
+%assign blocks_left %%NUM_BLOCKS
+
+%rep (%%NUM_BLOCKS / 4)
+%xdefine %%DSTREG %%DST %+ reg_idx
+%xdefine %%SRC1REG %%SRC1_ %+ reg_idx
+%xdefine %%SRC2REG %%SRC2_ %+ reg_idx
+ %%OPCODE %%DSTREG, %%SRC1REG, %%SRC2REG
+%undef %%DSTREG
+%undef %%SRC1REG
+%undef %%SRC2REG
+%assign reg_idx (reg_idx + 1)
+%assign blocks_left (blocks_left - 4)
+%endrep
+
+%xdefine %%DSTREG %%DST %+ reg_idx
+%xdefine %%SRC1REG %%SRC1_ %+ reg_idx
+%xdefine %%SRC2REG %%SRC2_ %+ reg_idx
+
+%if blocks_left == 1
+ %%OPCODE XWORD(%%DSTREG), XWORD(%%SRC1REG), XWORD(%%SRC2REG)
+%elif blocks_left == 2
+ %%OPCODE YWORD(%%DSTREG), YWORD(%%SRC1REG), YWORD(%%SRC2REG)
+%elif blocks_left == 3
+ %%OPCODE %%DSTREG, %%SRC1REG, %%SRC2REG
+%endif
+
+%endmacro
+
+;; =============================================================================
+;; Loads specified number of AES blocks into ZMM registers
+;; %%FLAGS are optional and only affect behavior when 3 trailing blocks are left
+;; - if %%FlAGS not provided then exactly 3 blocks are loaded (move and insert)
+;; - if "load_4_instead_of_3" option is passed then 4 blocks are loaded
+%macro ZMM_LOAD_BLOCKS_0_16 7-8
+%define %%NUM_BLOCKS %1 ; [in] numerical value, number of AES blocks (0 to 16)
+%define %%INP %2 ; [in] input data pointer to read from
+%define %%DATA_OFFSET %3 ; [in] offset to the output pointer (GP or numerical)
+%define %%DST0 %4 ; [out] ZMM register with loaded data
+%define %%DST1 %5 ; [out] ZMM register with loaded data
+%define %%DST2 %6 ; [out] ZMM register with loaded data
+%define %%DST3 %7 ; [out] ZMM register with loaded data
+%define %%FLAGS %8 ; [in] optional "load_4_instead_of_3"
+
+%assign src_offset 0
+%assign dst_idx 0
+
+%rep (%%NUM_BLOCKS / 4)
+%xdefine %%DSTREG %%DST %+ dst_idx
+ vmovdqu8 %%DSTREG, [%%INP + %%DATA_OFFSET + src_offset]
+%undef %%DSTREG
+%assign src_offset (src_offset + 64)
+%assign dst_idx (dst_idx + 1)
+%endrep
+
+%assign blocks_left (%%NUM_BLOCKS % 4)
+%xdefine %%DSTREG %%DST %+ dst_idx
+
+%if blocks_left == 1
+ vmovdqu8 XWORD(%%DSTREG), [%%INP + %%DATA_OFFSET + src_offset]
+%elif blocks_left == 2
+ vmovdqu8 YWORD(%%DSTREG), [%%INP + %%DATA_OFFSET + src_offset]
+%elif blocks_left == 3
+%ifidn %%FLAGS, load_4_instead_of_3
+ vmovdqu8 %%DSTREG, [%%INP + %%DATA_OFFSET + src_offset]
+%else
+ vmovdqu8 YWORD(%%DSTREG), [%%INP + %%DATA_OFFSET + src_offset]
+ vinserti64x2 %%DSTREG, [%%INP + %%DATA_OFFSET + src_offset + 32], 2
+%endif
+%endif
+
+%endmacro
+
+;; =============================================================================
+;; Loads specified number of AES blocks into ZMM registers using mask register
+;; for the last loaded register (xmm, ymm or zmm).
+;; Loads take place at 1 byte granularity.
+%macro ZMM_LOAD_MASKED_BLOCKS_0_16 8
+%define %%NUM_BLOCKS %1 ; [in] numerical value, number of AES blocks (0 to 16)
+%define %%INP %2 ; [in] input data pointer to read from
+%define %%DATA_OFFSET %3 ; [in] offset to the output pointer (GP or numerical)
+%define %%DST0 %4 ; [out] ZMM register with loaded data
+%define %%DST1 %5 ; [out] ZMM register with loaded data
+%define %%DST2 %6 ; [out] ZMM register with loaded data
+%define %%DST3 %7 ; [out] ZMM register with loaded data
+%define %%MASK %8 ; [in] mask register
+
+%assign src_offset 0
+%assign dst_idx 0
+%assign blocks_left %%NUM_BLOCKS
+
+%if %%NUM_BLOCKS > 0
+%rep (((%%NUM_BLOCKS + 3) / 4) - 1)
+%xdefine %%DSTREG %%DST %+ dst_idx
+ vmovdqu8 %%DSTREG, [%%INP + %%DATA_OFFSET + src_offset]
+%undef %%DSTREG
+%assign src_offset (src_offset + 64)
+%assign dst_idx (dst_idx + 1)
+%assign blocks_left (blocks_left - 4)
+%endrep
+%endif ; %if %%NUM_BLOCKS > 0
+
+%xdefine %%DSTREG %%DST %+ dst_idx
+
+%if blocks_left == 1
+ vmovdqu8 XWORD(%%DSTREG){%%MASK}{z}, [%%INP + %%DATA_OFFSET + src_offset]
+%elif blocks_left == 2
+ vmovdqu8 YWORD(%%DSTREG){%%MASK}{z}, [%%INP + %%DATA_OFFSET + src_offset]
+%elif (blocks_left == 3 || blocks_left == 4)
+ vmovdqu8 %%DSTREG{%%MASK}{z}, [%%INP + %%DATA_OFFSET + src_offset]
+%endif
+
+%endmacro
+
+;; =============================================================================
+;; Stores specified number of AES blocks from ZMM registers
+%macro ZMM_STORE_BLOCKS_0_16 7
+%define %%NUM_BLOCKS %1 ; [in] numerical value, number of AES blocks (0 to 16)
+%define %%OUTP %2 ; [in] output data pointer to write to
+%define %%DATA_OFFSET %3 ; [in] offset to the output pointer (GP or numerical)
+%define %%SRC0 %4 ; [in] ZMM register with data to store
+%define %%SRC1 %5 ; [in] ZMM register with data to store
+%define %%SRC2 %6 ; [in] ZMM register with data to store
+%define %%SRC3 %7 ; [in] ZMM register with data to store
+
+%assign dst_offset 0
+%assign src_idx 0
+
+%rep (%%NUM_BLOCKS / 4)
+%xdefine %%SRCREG %%SRC %+ src_idx
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset], %%SRCREG
+%undef %%SRCREG
+%assign dst_offset (dst_offset + 64)
+%assign src_idx (src_idx + 1)
+%endrep
+
+%assign blocks_left (%%NUM_BLOCKS % 4)
+%xdefine %%SRCREG %%SRC %+ src_idx
+
+%if blocks_left == 1
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset], XWORD(%%SRCREG)
+%elif blocks_left == 2
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset], YWORD(%%SRCREG)
+%elif blocks_left == 3
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset], YWORD(%%SRCREG)
+ vextracti32x4 [%%OUTP + %%DATA_OFFSET + dst_offset + 32], %%SRCREG, 2
+%endif
+
+%endmacro
+
+;; =============================================================================
+;; Stores specified number of AES blocks from ZMM registers with mask register
+;; for the last loaded register (xmm, ymm or zmm).
+;; Stores take place at 1 byte granularity.
+%macro ZMM_STORE_MASKED_BLOCKS_0_16 8
+%define %%NUM_BLOCKS %1 ; [in] numerical value, number of AES blocks (0 to 16)
+%define %%OUTP %2 ; [in] output data pointer to write to
+%define %%DATA_OFFSET %3 ; [in] offset to the output pointer (GP or numerical)
+%define %%SRC0 %4 ; [in] ZMM register with data to store
+%define %%SRC1 %5 ; [in] ZMM register with data to store
+%define %%SRC2 %6 ; [in] ZMM register with data to store
+%define %%SRC3 %7 ; [in] ZMM register with data to store
+%define %%MASK %8 ; [in] mask register
+
+%assign dst_offset 0
+%assign src_idx 0
+%assign blocks_left %%NUM_BLOCKS
+
+%if %%NUM_BLOCKS > 0
+%rep (((%%NUM_BLOCKS + 3) / 4) - 1)
+%xdefine %%SRCREG %%SRC %+ src_idx
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset], %%SRCREG
+%undef %%SRCREG
+%assign dst_offset (dst_offset + 64)
+%assign src_idx (src_idx + 1)
+%assign blocks_left (blocks_left - 4)
+%endrep
+%endif ; %if %%NUM_BLOCKS > 0
+
+%xdefine %%SRCREG %%SRC %+ src_idx
+
+%if blocks_left == 1
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset]{%%MASK}, XWORD(%%SRCREG)
+%elif blocks_left == 2
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset]{%%MASK}, YWORD(%%SRCREG)
+%elif (blocks_left == 3 || blocks_left == 4)
+ vmovdqu8 [%%OUTP + %%DATA_OFFSET + dst_offset]{%%MASK}, %%SRCREG
+%endif
+
+%endmacro
+
+;;; ===========================================================================
+;;; Handles AES encryption rounds
+;;; It handles special cases: the last and first rounds
+;;; Optionally, it performs XOR with data after the last AES round.
+;;; Uses NROUNDS parameterto check what needs to be done for the current round.
+;;; If 3 blocks are trailing then operation on whole ZMM is performed (4 blocks).
+%macro ZMM_AESENC_ROUND_BLOCKS_0_16 12
+%define %%L0B0_3 %1 ; [in/out] zmm; blocks 0 to 3
+%define %%L0B4_7 %2 ; [in/out] zmm; blocks 4 to 7
+%define %%L0B8_11 %3 ; [in/out] zmm; blocks 8 to 11
+%define %%L0B12_15 %4 ; [in/out] zmm; blocks 12 to 15
+%define %%KEY %5 ; [in] zmm containing round key
+%define %%ROUND %6 ; [in] round number
+%define %%D0_3 %7 ; [in] zmm or no_data; plain/cipher text blocks 0-3
+%define %%D4_7 %8 ; [in] zmm or no_data; plain/cipher text blocks 4-7
+%define %%D8_11 %9 ; [in] zmm or no_data; plain/cipher text blocks 8-11
+%define %%D12_15 %10 ; [in] zmm or no_data; plain/cipher text blocks 12-15
+%define %%NUMBL %11 ; [in] number of blocks; numerical value
+%define %%NROUNDS %12 ; [in] number of rounds; numerical value
+
+;;; === first AES round
+%if (%%ROUND < 1)
+ ;; round 0
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vpxorq, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+%endif ; ROUND 0
+
+;;; === middle AES rounds
+%if (%%ROUND >= 1 && %%ROUND <= %%NROUNDS)
+ ;; rounds 1 to 9/11/13
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vaesenc, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+%endif ; rounds 1 to 9/11/13
+
+;;; === last AES round
+%if (%%ROUND > %%NROUNDS)
+ ;; the last round - mix enclast with text xor's
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vaesenclast, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+
+;;; === XOR with data
+%ifnidn %%D0_3, no_data
+%ifnidn %%D4_7, no_data
+%ifnidn %%D8_11, no_data
+%ifnidn %%D12_15, no_data
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vpxorq, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%D0_3, %%D4_7, %%D8_11, %%D12_15
+%endif ; !no_data
+%endif ; !no_data
+%endif ; !no_data
+%endif ; !no_data
+
+%endif ; The last round
+
+%endmacro
+
+;;; ===========================================================================
+;;; Handles AES decryption rounds
+;;; It handles special cases: the last and first rounds
+;;; Optionally, it performs XOR with data after the last AES round.
+;;; Uses NROUNDS parameter to check what needs to be done for the current round.
+;;; If 3 blocks are trailing then operation on whole ZMM is performed (4 blocks).
+%macro ZMM_AESDEC_ROUND_BLOCKS_0_16 12
+%define %%L0B0_3 %1 ; [in/out] zmm; blocks 0 to 3
+%define %%L0B4_7 %2 ; [in/out] zmm; blocks 4 to 7
+%define %%L0B8_11 %3 ; [in/out] zmm; blocks 8 to 11
+%define %%L0B12_15 %4 ; [in/out] zmm; blocks 12 to 15
+%define %%KEY %5 ; [in] zmm containing round key
+%define %%ROUND %6 ; [in] round number
+%define %%D0_3 %7 ; [in] zmm or no_data; cipher text blocks 0-3
+%define %%D4_7 %8 ; [in] zmm or no_data; cipher text blocks 4-7
+%define %%D8_11 %9 ; [in] zmm or no_data; cipher text blocks 8-11
+%define %%D12_15 %10 ; [in] zmm or no_data; cipher text blocks 12-15
+%define %%NUMBL %11 ; [in] number of blocks; numerical value
+%define %%NROUNDS %12 ; [in] number of rounds; numerical value
+
+;;; === first AES round
+%if (%%ROUND < 1)
+ ;; round 0
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vpxorq, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+%endif ; ROUND 0
+
+;;; === middle AES rounds
+%if (%%ROUND >= 1 && %%ROUND <= %%NROUNDS)
+ ;; rounds 1 to 9/11/13
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vaesdec, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+%endif ; rounds 1 to 9/11/13
+
+;;; === last AES round
+%if (%%ROUND > %%NROUNDS)
+ ;; the last round - mix enclast with text xor's
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vaesdeclast, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%KEY, %%KEY, %%KEY, %%KEY
+
+;;; === XOR with data
+%ifnidn %%D0_3, no_data
+%ifnidn %%D4_7, no_data
+%ifnidn %%D8_11, no_data
+%ifnidn %%D12_15, no_data
+ ZMM_OPCODE3_DSTR_SRC1R_SRC2R_BLOCKS_0_16 %%NUMBL, vpxorq, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%L0B0_3, %%L0B4_7, %%L0B8_11, %%L0B12_15, \
+ %%D0_3, %%D4_7, %%D8_11, %%D12_15
+%endif ; !no_data
+%endif ; !no_data
+%endif ; !no_data
+%endif ; !no_data
+
+%endif ; The last round
+
+%endmacro
+
+%endif ;; _AES_COMMON_ASM
diff --git a/src/spdk/intel-ipsec-mb/include/aesni_emu.h b/src/spdk/intel-ipsec-mb/include/aesni_emu.h
new file mode 100644
index 000000000..575fada22
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/aesni_emu.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef _AESNI_EMU_H_
+#define _AESNI_EMU_H_
+#include <stdint.h>
+
+/* Interface to AESNI emulation routines */
+
+/* XMM type definitions and constants */
+
+#define MAX_BYTES_PER_XMM 16
+#define MAX_WORDS_PER_XMM 8
+#define MAX_DWORDS_PER_XMM 4
+#define MAX_QWORDS_PER_XMM 2
+
+union xmm_reg {
+ uint8_t byte[MAX_BYTES_PER_XMM];
+ uint16_t word[MAX_WORDS_PER_XMM];
+ uint32_t dword[MAX_DWORDS_PER_XMM];
+ uint64_t qword[MAX_QWORDS_PER_XMM];
+};
+
+/* AESNI emulation API */
+
+/**
+ * @brief AESKEYGENASIST instruction emulation function
+ *
+ * Assist in AES round key generation using an 8 bits Round Constant
+ * (RCON) specified in \a imm8, operating on 128 bits of data
+ *
+ * @param dst pointer to 128 bit buffer to store generated key
+ * @param src pointer to 128 bit src key
+ * @param imm8 round constant used to generate key
+ */
+IMB_DLL_LOCAL void emulate_AESKEYGENASSIST(union xmm_reg *dst,
+ const union xmm_reg *src,
+ const uint32_t imm8);
+
+/**
+ * @brief AESENC instruction emulation function
+ *
+ * Perform one round of an AES encryption flow
+ *
+ * @param dst pointer to 128 bit data (state) to operate on
+ * @param src pointer to 128 bit round key
+ */
+IMB_DLL_LOCAL void emulate_AESENC(union xmm_reg *dst,
+ const union xmm_reg *src);
+
+/**
+ * @brief AESENCLAST instruction emulation function
+ *
+ * Perform last round of an AES encryption flow
+ *
+ * @param dst pointer to 128 bit data (state) to operate on
+ * @param src pointer to 128 bit round key
+ */
+IMB_DLL_LOCAL void emulate_AESENCLAST(union xmm_reg *dst,
+ const union xmm_reg *src);
+
+/**
+ * @brief AESDEC instruction emulation function
+ *
+ * Perform one round of an AES decryption flow
+ *
+ * @param dst pointer to 128 bit data (state) to operate on
+ * @param src pointer to 128 bit round key
+ */
+IMB_DLL_LOCAL void emulate_AESDEC(union xmm_reg *dst,
+ const union xmm_reg *src);
+
+/**
+ * @brief AESDECLAST instruction emulation function
+ *
+ * Perform last round of an AES decryption flow
+ *
+ * @param dst pointer to 128 bit data (state) to operate on
+ * @param src pointer to 128 bit round key
+ */
+IMB_DLL_LOCAL void emulate_AESDECLAST(union xmm_reg *dst,
+ const union xmm_reg *src);
+
+/**
+ * @brief AESIMC instruction emulation function
+ *
+ * Perform the InvMixColumn transformation on
+ * a 128 bit round key
+ *
+ * @param dst pointer to 128 bit buffer to store result
+ * @param src pointer to 128 bit round key
+ */
+IMB_DLL_LOCAL void emulate_AESIMC(union xmm_reg *dst,
+ const union xmm_reg *src);
+
+#endif /* _AESNI_EMU_H_ */
diff --git a/src/spdk/intel-ipsec-mb/include/aesni_emu.inc b/src/spdk/intel-ipsec-mb/include/aesni_emu.inc
new file mode 100644
index 000000000..5a40180c8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/aesni_emu.inc
@@ -0,0 +1,247 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _AESNI_EMU_INC_
+%define _AESNI_EMU_INC_
+
+%include "include/reg_sizes.asm"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Utility macros and defines to assist AESNI translation macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GP0 rax
+%define GP1 rbx
+%define GP2 rcx
+%define GP3 rdx
+%define GP4 rbp
+%define GP5 rsi
+%define GP6 rdi
+%define GP7 r8
+%define GP8 r9
+%define GP9 r10
+%define GP10 r11
+%define GP11 r12
+%define GP12 r13
+%define GP13 r14
+%define GP14 r15
+%define NUM_GP_REGS 15
+%define NUM_XMM_REGS 16
+
+%define GP_SZ 8
+%define XMM_SZ 16
+%define ARG_SZ 16
+
+;; 8 extra bytes added to align to 16 bytes
+%define XMM_OFFSET ((NUM_GP_REGS + 1) * GP_SZ)
+;; ARG1 placed in the stack after all GP and XMM registers
+%define ARG1_OFFSET (XMM_OFFSET + (NUM_XMM_REGS * XMM_SZ))
+;; ARG2 placed in the stack after all GP and XMM registers and ARG1
+%define ARG2_OFFSET (ARG1_OFFSET + ARG_SZ)
+
+%define GP(x) GP %+ x
+%define XMM(x) xmm %+ x
+
+;; Reserve enough stack space to store all GP and XMM
+;; registers and emulation function arguments
+;; e.g. void emulate_AESXXX(xmm_reg *dst, xmm_reg *src);
+%define RES_STACK_SZ (ARG2_OFFSET + ARG_SZ)
+
+;; Allocate stack space and save GP registers
+%macro SAVE_GP_REGS 0
+ push rax
+ mov rax, rsp
+ sub rsp, RES_STACK_SZ
+ and rsp, -16
+%assign gp_regs_i 0
+%rep NUM_GP_REGS
+ mov [rsp + 8*gp_regs_i], GP(gp_regs_i)
+%assign gp_regs_i gp_regs_i+1
+%endrep
+%endmacro
+
+;; Restore GP registers and stack pointer
+%macro RESTORE_GP_REGS 0
+%assign gp_regs_i 0
+%rep NUM_GP_REGS
+ mov GP(gp_regs_i), [rsp + 8*gp_regs_i]
+%assign gp_regs_i gp_regs_i+1
+%endrep
+ mov rsp, rax
+ pop rax
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Generic macro to translate AESNI instructions to AESNI emulation functions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%macro EMULATE_AESNI 4
+%define %%func %1
+%define %%src_dst %2
+%define %%key %3
+%define %%imm %4
+
+%ifdef LINUX
+%define %%arg1 rdi
+%define %%arg2 rsi
+%define %%arg3 rdx
+%else
+%define %%arg1 rcx
+%define %%arg2 rdx
+%define %%arg3 r8
+%endif
+
+;; Check if key is reg or ptr
+%assign IS_REG 0
+%assign x 0
+%rep NUM_XMM_REGS
+%ifidni %%key, XMM(x)
+ %assign IS_REG 1
+ %exitrep
+%endif
+%assign x x+1
+%endrep
+ ;; save GP registers to stack
+ SAVE_GP_REGS
+
+ ;; move function args onto stack before function call
+ movdqa [rsp + ARG1_OFFSET], %%src_dst
+%if IS_REG
+ movdqa [rsp + ARG2_OFFSET], %%key
+%else
+ movdqu %%src_dst, %%key
+ movdqa [rsp + ARG2_OFFSET], %%src_dst
+%endif
+ lea %%arg1, [rsp + ARG1_OFFSET]
+ lea %%arg2, [rsp + ARG2_OFFSET]
+
+ ;; move 8 bit imm rcon for aeskeygenassist
+%ifnum %%imm
+ mov BYTE(%%arg3), %%imm
+%endif
+
+;; save XMM registers to stack, as some compilers may use them in "func"
+%assign reg_idx 0
+%rep NUM_XMM_REGS
+ movdqa [rsp + XMM_OFFSET + (reg_idx * XMM_SZ)], XMM(reg_idx)
+%assign reg_idx reg_idx + 1
+%endrep
+
+;; reserve space on stack for up to 4 arguments on the stack (windows only)
+%ifndef LINUX
+ sub rsp, 32
+%endif
+ ;; call emulation function
+ call %%func
+%ifndef LINUX
+ add rsp, 32
+%endif
+
+;; restore XMM registers from stack
+%assign reg_idx 0
+%rep NUM_XMM_REGS
+ movdqa XMM(reg_idx), [rsp + XMM_OFFSET + (reg_idx * XMM_SZ)]
+%assign reg_idx reg_idx + 1
+%endrep
+
+ ;; Destination XMM gets overwritten with result from func
+ movdqa %%src_dst, [rsp + ARG1_OFFSET]
+
+ RESTORE_GP_REGS
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Macros to translate AESNI instructions to AESNI emulation functions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; AESENC translation macro
+%macro EMULATE_AESENC 2
+%define %%src_dst %1
+%define %%key %2
+ EMULATE_AESNI emulate_AESENC, %%src_dst, %%key, ""
+%endmacro
+
+;; AESENCLAST translation macro
+%macro EMULATE_AESENCLAST 2
+%define %%src_dst %1
+%define %%key %2
+ EMULATE_AESNI emulate_AESENCLAST, %%src_dst, %%key, ""
+%endmacro
+
+;; AESDEC translation macro
+%macro EMULATE_AESDEC 2
+%define %%src_dst %1
+%define %%key %2
+ EMULATE_AESNI emulate_AESDEC, %%src_dst, %%key, ""
+%endmacro
+
+;; AESDECLAST translation macro
+%macro EMULATE_AESDECLAST 2
+%define %%src_dst %1
+%define %%key %2
+ EMULATE_AESNI emulate_AESDECLAST, %%src_dst, %%key, ""
+%endmacro
+
+;; AESIMC translation macro
+%macro EMULATE_AESIMC 2
+%define %%src_dst %1
+%define %%key %2
+ EMULATE_AESNI emulate_AESIMC, %%src_dst, %%key, ""
+%endmacro
+
+;; AESKEYGENASSIST translation macro
+%macro EMULATE_AESKEYGENASSIST 3
+%define %%src_dst %1
+%define %%key %2
+%define %%imm %3
+ EMULATE_AESNI emulate_AESKEYGENASSIST, %%src_dst, %%key, %%imm
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; AESNI defines
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%ifndef NO_AESNI_RENAME
+%define aesenc EMULATE_AESENC
+%define aesenclast EMULATE_AESENCLAST
+%define aesdec EMULATE_AESDEC
+%define aesdeclast EMULATE_AESDECLAST
+%define aesimc EMULATE_AESIMC
+%define aeskeygenassist EMULATE_AESKEYGENASSIST
+%endif
+
+extern emulate_AESENC
+extern emulate_AESENCLAST
+extern emulate_AESDEC
+extern emulate_AESDECLAST
+extern emulate_AESIMC
+extern emulate_AESKEYGENASSIST
+
+%endif ; end ifndef _AESNI_EMU_INC_
diff --git a/src/spdk/intel-ipsec-mb/include/clear_regs.asm b/src/spdk/intel-ipsec-mb/include/clear_regs.asm
new file mode 100644
index 000000000..6cb48c49e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/clear_regs.asm
@@ -0,0 +1,196 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _CLEAR_REGS_ASM_
+%define _CLEAR_REGS_ASM_
+
+%include "include/os.asm"
+
+;
+; This macro clears any GP registers passed
+;
+%macro clear_gps 1-16
+%define %%NUM_REGS %0
+%rep %%NUM_REGS
+ xor %1, %1
+%rotate 1
+%endrep
+%endmacro
+
+;
+; This macro clears any XMM registers passed on SSE
+;
+%macro clear_xmms_sse 1-16
+%define %%NUM_REGS %0
+%rep %%NUM_REGS
+ pxor %1, %1
+%rotate 1
+%endrep
+%endmacro
+
+;
+; This macro clears any XMM registers passed on AVX
+;
+%macro clear_xmms_avx 1-16
+%define %%NUM_REGS %0
+%rep %%NUM_REGS
+ vpxor %1, %1
+%rotate 1
+%endrep
+%endmacro
+
+;
+; This macro clears any YMM registers passed
+;
+%macro clear_ymms 1-16
+%define %%NUM_REGS %0
+%rep %%NUM_REGS
+ vpxor %1, %1
+%rotate 1
+%endrep
+%endmacro
+
+;
+; This macro clears any ZMM registers passed
+;
+%macro clear_zmms 1-32
+%define %%NUM_REGS %0
+%rep %%NUM_REGS
+ vpxorq %1, %1
+%rotate 1
+%endrep
+%endmacro
+
+;
+; This macro clears all scratch GP registers
+; for Windows or Linux
+;
+%macro clear_scratch_gps_asm 0
+ clear_gps rax, rcx, rdx, r8, r9, r10, r11
+%ifdef LINUX
+ clear_gps rdi, rsi
+%endif
+%endmacro
+
+;
+; This macro clears all scratch XMM registers on SSE
+;
+%macro clear_scratch_xmms_sse_asm 0
+%ifdef LINUX
+%assign i 0
+%rep 16
+ pxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+; On Windows, XMM0-XMM5 registers are scratch registers
+%else
+%assign i 0
+%rep 6
+ pxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+%endmacro
+
+;
+; This macro clears all scratch XMM registers on AVX
+;
+%macro clear_scratch_xmms_avx_asm 0
+%ifdef LINUX
+ vzeroall
+; On Windows, XMM0-XMM5 registers are scratch registers
+%else
+%assign i 0
+%rep 6
+ vpxor xmm %+ i, xmm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+%endmacro
+
+;
+; This macro clears all scratch YMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15)
+;
+%macro clear_scratch_ymms_asm 0
+; On Linux, all YMM registers are scratch registers
+%ifdef LINUX
+ vzeroall
+; On Windows, YMM0-YMM5 registers are scratch registers.
+; YMM6-YMM15 upper 128 bits are scratch registers too, but
+; the lower 128 bits are to be restored after calling these function
+; which clears the upper bits too.
+%else
+%assign i 0
+%rep 6
+ vpxor ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+%endmacro
+
+;
+; This macro clears all scratch ZMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15). YMM registers are used
+; on purpose, since XOR'ing YMM registers is faster
+; than XOR'ing ZMM registers, and the operation clears
+; also the upper 256 bits
+;
+%macro clear_scratch_zmms_asm 0
+; On Linux, all ZMM registers are scratch registers
+%ifdef LINUX
+ vzeroall
+ ;; vzeroall only clears the first 16 ZMM registers
+%assign i 16
+%rep 16
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+; On Windows, ZMM0-ZMM5 and ZMM16-ZMM31 registers are scratch registers.
+; ZMM6-ZMM15 upper 384 bits are scratch registers too, but
+; the lower 128 bits are to be restored after calling these function
+; which clears the upper bits too.
+%else
+%assign i 0
+%rep 6
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+
+%assign i 16
+%rep 16
+ vpxorq ymm %+ i, ymm %+ i
+%assign i (i+1)
+%endrep
+%endif ; LINUX
+%endmacro
+
+%endif ;; _CLEAR_REGS_ASM
diff --git a/src/spdk/intel-ipsec-mb/include/clear_regs_mem.h b/src/spdk/intel-ipsec-mb/include/clear_regs_mem.h
new file mode 100644
index 000000000..40f888ec4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/clear_regs_mem.h
@@ -0,0 +1,53 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef CLEAR_REGS_H
+#define CLEAR_REGS_H
+
+#define CLEAR_SCRATCH_GPS clear_scratch_gps
+
+void force_memset_zero(void *mem, const size_t size);
+
+static inline void
+clear_mem(void *mem, const size_t size)
+{
+ force_memset_zero(mem, size);
+}
+
+static inline void
+clear_var(void *var, const size_t size)
+{
+ force_memset_zero(var, size);
+}
+
+void clear_scratch_gps(void);
+void clear_scratch_xmms_sse(void);
+void clear_scratch_xmms_avx(void);
+void clear_scratch_ymms(void);
+void clear_scratch_zmms(void);
+
+#endif /* CLEAR_REGS_H */
diff --git a/src/spdk/intel-ipsec-mb/include/clear_regs_mem_fns.asm b/src/spdk/intel-ipsec-mb/include/clear_regs_mem_fns.asm
new file mode 100644
index 000000000..4fd6f7edb
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/clear_regs_mem_fns.asm
@@ -0,0 +1,124 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/clear_regs.asm"
+
+section .text
+;
+; This function clears all scratch GP registers
+;
+; void clear_scratch_gps(void)
+MKGLOBAL(clear_scratch_gps,function,internal)
+clear_scratch_gps:
+
+ clear_scratch_gps_asm
+
+ ret
+
+;
+; This function clears all scratch XMM registers
+;
+; void clear_scratch_xmms_sse(void)
+MKGLOBAL(clear_scratch_xmms_sse,function,internal)
+clear_scratch_xmms_sse:
+
+ clear_scratch_xmms_sse_asm
+
+ ret
+
+;
+; This function clears all scratch XMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15)
+;
+; void clear_scratch_xmms_avx(void)
+MKGLOBAL(clear_scratch_xmms_avx,function,internal)
+clear_scratch_xmms_avx:
+
+ clear_scratch_xmms_avx_asm
+
+ ret
+
+;
+; This function clears all scratch YMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15)
+;
+; void clear_scratch_ymms(void)
+MKGLOBAL(clear_scratch_ymms,function,internal)
+clear_scratch_ymms:
+
+ clear_scratch_ymms_asm
+
+ ret
+
+;
+; This function clears all scratch ZMM registers
+;
+; It should be called before restoring the XMM registers
+; for Windows (XMM6-XMM15). YMM registers are used
+; on purpose, since XOR'ing YMM registers is faster
+; than XOR'ing ZMM registers, and the operation clears
+; also the upper 256 bits
+;
+; void clear_scratch_zmms(void)
+MKGLOBAL(clear_scratch_zmms,function,internal)
+clear_scratch_zmms:
+
+ clear_scratch_zmms_asm
+
+ ret
+
+;
+; This function clears all memory passed
+;
+; void force_memset_zero(void *mem, const size_t size)
+MKGLOBAL(force_memset_zero,function,internal)
+force_memset_zero:
+
+%ifdef LINUX
+ mov rcx, rsi
+%else
+ push rdi
+ mov rdi, rcx
+ mov rcx, rdx
+%endif
+ xor eax, eax
+ cld
+ rep stosb
+
+%ifndef LINUX
+ pop rdi
+%endif
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/include/const.inc b/src/spdk/intel-ipsec-mb/include/const.inc
new file mode 100644
index 000000000..e77e80d2e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/const.inc
@@ -0,0 +1,163 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _CONST_INC_
+%define _CONST_INC_
+
+;;; Tables used to insert word into a SIMD register
+extern len_shift_tab
+extern len_mask_tab
+extern shift_tab_16
+
+;;; Table to do 0x80 byte shift for padding prefix
+extern padding_0x80_tab16
+
+;;; Size of len_shift_tab defined in const.asm module
+%define len_tab_diff 128
+
+; PINSRW_COMMON insert word into 128 bit SIMD register
+%macro PINSRW_COMMON 7
+
+%define %%type %1 ; instruction type - sse or avx
+%define %%dest %2 ; dest XMM reg to insert word
+%define %%tmp_simd %3 ; XMM reg to clobber
+%define %%tmp_gp %4 ; GP reg to clobber
+%define %%idx %5 ; word index to insert value into XMM
+%define %%val %6 ; word value to insert into idx
+%define %%scale_idx %7 ; flag to set if index is to be scaled x16
+
+%ifidn %%scale_idx, scale_x16
+ shl %%idx, 4 ; scale idx up x16
+%endif
+%ifnum %%val
+ ;; immediate value passed on
+ mov DWORD(%%tmp_gp), %%val
+%ifidn %%type, sse
+ movd %%tmp_simd, DWORD(%%tmp_gp)
+%else
+ vmovd %%tmp_simd, DWORD(%%tmp_gp)
+%endif
+%else
+ ;; register name passed on
+%ifidn %%type, sse
+ movd %%tmp_simd, DWORD(%%val)
+%else
+ vmovd %%tmp_simd, DWORD(%%val)
+%endif
+%endif
+ lea %%tmp_gp, [rel len_shift_tab]
+ ;; check type - SSE or AVX
+%ifidn %%type, sse
+ pshufb %%tmp_simd, [%%tmp_gp + %%idx]
+ pand %%dest, [%%tmp_gp + len_tab_diff + %%idx]
+ por %%dest, %%tmp_simd
+%else
+ vpshufb %%tmp_simd, [%%tmp_gp + %%idx]
+ vpand %%dest, [%%tmp_gp + len_tab_diff + %%idx]
+ vpor %%dest, %%tmp_simd
+%endif
+%ifidn %%scale_idx, scale_x16
+ shr %%idx, 4 ; reset idx
+%endif
+%endmacro
+
+;;; Call SSE macro
+%define XPINSRW PINSRW_COMMON sse,
+
+;;; Call AVX macro
+%define XVPINSRW PINSRW_COMMON avx,
+
+
+;;; VPINSRW_M256 insert word into 32 byte memory range
+%macro VPINSRW_M256 8
+
+%define %%mem_addr %1 ; 16 byte aligned memory address to insert word
+%define %%tmp_simd1 %2 ; XMM reg to clobber
+%define %%tmp_simd2 %3 ; XMM reg to clobber
+%define %%tmp_gp %4 ; GP reg to clobber
+%define %%offset %5 ; GP reg used to store offset
+%define %%idx %6 ; word index to insert value
+%define %%val %7 ; word value to insert into idx
+%define %%scale_idx %8 ; flag to set if index is to be scaled x16
+
+ mov %%offset, %%idx
+ and %%offset, 0x8 ; set offset 0 or 8
+ and %%idx, 0x7 ; remove offset from idx
+ vmovdqa %%tmp_simd1, [%%mem_addr + %%offset*2]
+ XVPINSRW %%tmp_simd1, %%tmp_simd2, %%tmp_gp, %%idx, %%val, %%scale_idx
+ vmovdqa [%%mem_addr + %%offset*2], %%tmp_simd1
+ or %%idx, %%offset ; reset offset
+%endmacro
+
+;;; PSLB_COMMON shift bytes 128 bit SIMD register
+%macro PSLB_COMMON 6
+
+%define %%type %1 ; [in] instruction type - sse or avx
+%define %%dir %2 ; [in] shift direction - left or right
+%define %%reg %3 ; [in/out] XMM reg to shift bytes
+%define %%num %4 ; [in] GP reg containing number of bytes to shift
+%define %%shuf_tab %5 ; [out] XMM reg to store shuffle table
+%define %%tmp_gp %6 ; [clobbered] GP reg to clobber
+
+ ;; load shift table into %%shuf_tab
+ lea %%tmp_gp, [rel shift_tab_16 + 16]
+%ifidn %%dir, left
+ sub %%tmp_gp, %%num
+%else
+ add %%tmp_gp, %%num
+%endif
+
+%ifidn %%type, sse
+ movdqu %%shuf_tab, [%%tmp_gp]
+ pshufb %%reg, %%shuf_tab
+%else
+ vmovdqu %%shuf_tab, [%%tmp_gp]
+ vpshufb %%reg, %%shuf_tab
+%endif
+%endmacro
+
+;;; Call SSE left shift macro
+%macro XPSLLB 4
+ PSLB_COMMON sse, left, %1,%2,%3,%4
+%endm
+
+;;; Call SSE right shift macro
+%macro XPSRLB 4
+ PSLB_COMMON sse, right, %1,%2,%3,%4
+%endm
+
+;;; Call AVX left shift macro
+%macro XVPSLLB 4
+ PSLB_COMMON avx, left, %1,%2,%3,%4
+%endm
+
+;;; Call AVX right shift macro
+%macro XVPSRLB 4
+ PSLB_COMMON avx, right, %1,%2,%3,%4
+%endm
+
+%endif ; end ifndef _CONST_INC_
diff --git a/src/spdk/intel-ipsec-mb/include/constant_lookup.asm b/src/spdk/intel-ipsec-mb/include/constant_lookup.asm
new file mode 100644
index 000000000..a3c81dc75
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/constant_lookup.asm
@@ -0,0 +1,561 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+
+section .data
+default rel
+
+align 16
+idx_tab8:
+ db 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ db 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
+
+align 16
+add_16:
+ db 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ db 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10
+
+align 16
+idx_tab16:
+ dw 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+
+align 16
+add_8:
+ dw 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8
+
+align 16
+idx_tab32:
+ dd 0x0, 0x1, 0x2, 0x3
+
+align 16
+add_4:
+ dd 0x4, 0x4, 0x4, 0x4
+
+align 16
+idx_tab64:
+ dq 0x0, 0x1
+
+add_2:
+ dq 0x2, 0x2
+
+align 16
+bcast_mask:
+ db 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01,
+ db 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01
+
+section .text
+
+%ifdef LINUX
+ %define arg1 rdi
+ %define arg2 rsi
+ %define arg3 rdx
+%else
+ %define arg1 rcx
+ %define arg2 rdx
+ %define arg3 r8
+%endif
+
+%define bcast_idx xmm0
+%define xadd xmm1
+%define accum_val xmm2
+%define xindices xmm3
+%define xtmp xmm4
+%define xtmp2 xmm5
+%define tmp r9
+%define offset r10
+
+%define table arg1
+%define idx arg2
+%define size arg3
+
+; uint8_t lookup_8bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up (multiple of 16 bytes)
+MKGLOBAL(lookup_8bit_sse,function,internal)
+lookup_8bit_sse:
+
+ ;; Number of loop iters = matrix size / 4 (number of values in XMM)
+ shr size, 4
+ je exit8_sse
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ movd bcast_idx, DWORD(idx)
+ pxor xtmp, xtmp
+ pxor accum_val, accum_val
+ pshufb bcast_idx, xtmp
+
+ movdqa xadd, [rel add_16]
+ movdqa xindices, [rel idx_tab8]
+
+loop8_sse:
+ movdqa xtmp, xindices
+
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ pcmpeqb xtmp, bcast_idx
+
+ ;; Load next 16 values
+ movdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ pand xtmp2, xtmp
+
+ por accum_val, xtmp2
+
+ ;; Get next 16 indices
+ paddb xindices, xadd
+
+ add offset, 16
+ dec size
+
+ jne loop8_sse
+
+ ;; Extract value from XMM register
+ movdqa xtmp, accum_val
+ pslldq xtmp, 8 ; shift left by 64 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ pslldq xtmp, 4 ; shift left by 32 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ pslldq xtmp, 2 ; shift left by 16 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ pslldq xtmp, 1 ; shift left by 8 bits
+ por accum_val, xtmp
+
+ pextrb rax, accum_val, 15
+
+exit8_sse:
+ ret
+
+; uint8_t lookup_8bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up (multiple of 16 bytes)
+MKGLOBAL(lookup_8bit_avx,function,internal)
+lookup_8bit_avx:
+ ;; Number of loop iters = matrix size / 4 (number of values in XMM)
+ shr size, 4
+ je exit8_avx
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ vmovd bcast_idx, DWORD(idx)
+ vpxor xtmp, xtmp
+ vpxor accum_val, accum_val
+ vpshufb bcast_idx, xtmp
+
+ vmovdqa xadd, [rel add_16]
+ vmovdqa xindices, [rel idx_tab8]
+
+loop8_avx:
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ vpcmpeqb xtmp, xindices, bcast_idx
+
+ ;; Load next 16 values
+ vmovdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ vpand xtmp2, xtmp
+
+ vpor accum_val, xtmp2
+
+ ;; Get next 16 indices
+ vpaddb xindices, xadd
+
+ add offset, 16
+ dec size
+
+ jne loop8_avx
+
+ ;; Extract value from XMM register
+ vpslldq xtmp, accum_val, 8 ; shift left by 64 bits
+ vpor accum_val, xtmp
+
+ vpslldq xtmp, accum_val, 4 ; shift left by 32 bits
+ vpor accum_val, xtmp
+
+ vpslldq xtmp, accum_val, 2 ; shift left by 16 bits
+ vpor accum_val, xtmp
+
+ vpslldq xtmp, accum_val, 1 ; shift left by 8 bits
+ vpor accum_val, xtmp
+
+ vpextrb rax, accum_val, 15
+
+exit8_avx:
+
+ ret
+
+; uint8_t lookup_16bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_16bit_sse,function,internal)
+lookup_16bit_sse:
+
+ ;; Number of loop iters = matrix size / 8 (number of values in XMM)
+ shr size, 3
+ je exit16_sse
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ movd bcast_idx, DWORD(idx)
+ movdqa xtmp, [rel bcast_mask]
+ pxor accum_val, accum_val
+ pshufb bcast_idx, xtmp
+
+ movdqa xadd, [rel add_8]
+ movdqa xindices, [rel idx_tab16]
+
+loop16_sse:
+
+ movdqa xtmp, xindices
+
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ pcmpeqw xtmp, bcast_idx
+
+ ;; Load next 8 values
+ movdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ pand xtmp2, xtmp
+
+ por accum_val, xtmp2
+
+ ;; Get next 8 indices
+ paddw xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop16_sse
+
+ ;; Extract value from XMM register
+ movdqa xtmp, accum_val
+ pslldq xtmp, 8 ; shift left by 64 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ pslldq xtmp, 4 ; shift left by 32 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ pslldq xtmp, 2 ; shift left by 16 bits
+ por accum_val, xtmp
+
+ pextrw rax, accum_val, 7
+
+exit16_sse:
+ ret
+
+; uint8_t lookup_16bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_16bit_avx,function,internal)
+lookup_16bit_avx:
+
+ ;; Number of loop iters = matrix size / 8 (number of values in XMM)
+ shr size, 3
+ je exit16_avx
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ vmovd bcast_idx, DWORD(idx)
+ vmovdqa xtmp, [rel bcast_mask]
+ vpxor accum_val, accum_val
+ vpshufb bcast_idx, xtmp
+
+ vmovdqa xadd, [rel add_8]
+ vmovdqa xindices, [rel idx_tab16]
+
+loop16_avx:
+
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ vpcmpeqw xtmp, xindices, bcast_idx
+
+ ;; Load next 16 values
+ vmovdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ vpand xtmp2, xtmp
+
+ vpor accum_val, xtmp2
+
+ ;; Get next 8 indices
+ vpaddw xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop16_avx
+
+ ;; Extract value from XMM register
+ vpslldq xtmp, accum_val, 8 ; shift left by 64 bits
+ vpor accum_val, xtmp
+
+ vpslldq xtmp, accum_val, 4 ; shift left by 32 bits
+ vpor accum_val, xtmp
+
+ vpslldq xtmp, accum_val, 2 ; shift left by 16 bits
+ vpor accum_val, xtmp
+
+ vpextrw rax, accum_val, 7
+
+exit16_avx:
+ ret
+
+; uint32_t lookup_32bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_32bit_sse,function,internal)
+lookup_32bit_sse:
+
+ ;; Number of loop iters = matrix size / 4 (number of values in XMM)
+ shr size, 2
+ je exit32_sse
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ movd bcast_idx, DWORD(idx)
+ pxor accum_val, accum_val
+ pshufd bcast_idx, bcast_idx, 0
+
+ movdqa xadd, [rel add_4]
+ movdqa xindices, [rel idx_tab32]
+
+loop32_sse:
+ movdqa xtmp, xindices
+
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ pcmpeqd xtmp, bcast_idx
+
+ ;; Load next 4 values
+ movdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ pand xtmp2, xtmp
+
+ por accum_val, xtmp2
+
+ ;; Get next 4 indices
+ paddd xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop32_sse
+
+ ;; Extract value from XMM register
+ movdqa xtmp, accum_val
+ psrldq xtmp, 8 ; shift right by 64 bits
+ por accum_val, xtmp
+
+ movdqa xtmp, accum_val
+ psrldq xtmp, 4 ; shift right by 32 bits
+ por accum_val, xtmp
+
+ movd eax, accum_val
+
+exit32_sse:
+ ret
+
+
+; uint32_t lookup_32bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_32bit_avx,function,internal)
+lookup_32bit_avx:
+ ;; Number of loop iters = matrix size / 4 (number of values in XMM)
+ shr size, 2
+ je exit32_avx
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ vmovd bcast_idx, DWORD(idx)
+ vpxor accum_val, accum_val
+ vpshufd bcast_idx, bcast_idx, 0
+
+ vmovdqa xadd, [rel add_4]
+ vmovdqa xindices, [rel idx_tab32]
+
+loop32_avx:
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ vpcmpeqd xtmp, xindices, bcast_idx
+
+ ;; Load next 4 values
+ vmovdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ vpand xtmp2, xtmp
+
+ vpor accum_val, xtmp2
+
+ ;; Get next 4 indices
+ vpaddd xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop32_avx
+
+ ;; Extract value from XMM register
+ vpsrldq xtmp, accum_val, 8 ; shift right by 64 bits
+ vpor accum_val, xtmp
+
+ vpsrldq xtmp, accum_val, 4 ; shift right by 32 bits
+ vpor accum_val, xtmp
+
+ vmovd eax, accum_val
+
+exit32_avx:
+ ret
+
+
+; uint64_t lookup_64bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_64bit_sse,function,internal)
+lookup_64bit_sse:
+ ;; Number of loop iters = matrix size / 2 (number of values in XMM)
+ shr size, 1
+ je exit64_sse
+
+ xor offset, offset
+
+ ;; Broadcast idx to look up
+ movq bcast_idx, idx
+ pxor accum_val, accum_val
+ pinsrq bcast_idx, idx, 1
+
+ movdqa xadd, [rel add_2]
+ movdqa xindices, [rel idx_tab64]
+
+loop64_sse:
+ movdqa xtmp, xindices
+
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ pcmpeqq xtmp, bcast_idx
+
+ ;; Load next 2 values
+ movdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ pand xtmp2, xtmp
+
+ por accum_val, xtmp2
+
+ ;; Get next 2 indices
+ paddq xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop64_sse
+
+ ;; Extract value from XMM register
+ movdqa xtmp, accum_val
+ psrldq xtmp, 8 ; shift right by 64 bits
+ por accum_val, xtmp
+
+ movq rax, accum_val
+
+exit64_sse:
+ ret
+
+
+; uint64_t lookup_64bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+; arg 1 : pointer to table to look up
+; arg 2 : index to look up
+; arg 3 : size of table to look up
+MKGLOBAL(lookup_64bit_avx,function,internal)
+lookup_64bit_avx:
+ ;; Number of loop iters = matrix size / 2 (number of values in XMM)
+ shr size, 1
+ je exit64_avx
+
+ xor offset, offset
+
+ vmovq bcast_idx, idx
+ vpxor accum_val, accum_val
+ vpinsrq bcast_idx, idx, 1
+
+ vmovdqa xadd, [rel add_2]
+ vmovdqa xindices, [rel idx_tab64]
+
+loop64_avx:
+ ;; Compare indices with idx
+ ;; This generates a mask with all 0s except for the position where idx matches (all 1s here)
+ vpcmpeqq xtmp, xindices, bcast_idx
+
+ ;; Load next 2 values
+ vmovdqa xtmp2, [table + offset]
+
+ ;; This generates data with all 0s except the value we are looking for in the index to look up
+ vpand xtmp2, xtmp
+
+ vpor accum_val, xtmp2
+
+ ;; Get next 2 indices
+ vpaddq xindices, xadd
+ add offset, 16
+ dec size
+
+ jne loop64_avx
+
+ ;; Extract value from XMM register
+ vpsrldq xtmp, accum_val, 8 ; shift right by 64 bits
+ vpor accum_val, xtmp
+
+ vmovq rax, accum_val
+
+exit64_avx:
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/include/constant_lookup.h b/src/spdk/intel-ipsec-mb/include/constant_lookup.h
new file mode 100644
index 000000000..bd56a24d2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/constant_lookup.h
@@ -0,0 +1,173 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef CONSTANT_LOOKUP_H
+#define CONSTANT_LOOKUP_H
+
+#include "intel-ipsec-mb.h"
+
+#ifdef SAFE_LOOKUP
+#define LOOKUP8_SSE(_table, _idx, _size) \
+ lookup_8bit_sse(_table, _idx, _size)
+#define LOOKUP8_AVX(_table, _idx, _size) \
+ lookup_8bit_avx(_table, _idx, _size)
+#define LOOKUP16_SSE(_table, _idx, _size) \
+ lookup_16bit_sse(_table, _idx, _size)
+#define LOOKUP16_AVX(_table, _idx, _size) \
+ lookup_16bit_avx(_table, _idx, _size)
+#define LOOKUP32_SSE(_table, _idx, _size) \
+ lookup_32bit_sse(_table, _idx, _size)
+#define LOOKUP32_AVX(_table, _idx, _size) \
+ lookup_32bit_avx(_table, _idx, _size)
+#define LOOKUP64_SSE(_table, _idx, _size) \
+ lookup_64bit_sse(_table, _idx, _size)
+#define LOOKUP64_AVX(_table, _idx, _size) \
+ lookup_64bit_avx(_table, _idx, _size)
+#else
+#define LOOKUP8_SSE(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP8_AVX(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP16_SSE(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP16_AVX(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP32_SSE(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP32_AVX(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP64_SSE(_table, _idx, _size) \
+ _table[_idx]
+#define LOOKUP64_AVX(_table, _idx, _size) \
+ _table[_idx]
+#endif
+
+/*
+ * @brief Constant time SSE lookup function on variable size table
+ * with 8-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 8 bit elements in the table (multiple of 16)
+ *
+ * @return value to lookup
+ */
+uint8_t
+lookup_8bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time AVX lookup function on variable size table
+ * with 8-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 8 bit elements in the table (multiple of 16)
+ *
+ * @return value to lookup
+ */
+uint8_t
+lookup_8bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time SSE lookup function on variable size table
+ * with 16-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 16 bit elements in the table (multiple of 8)
+ *
+ * @return value to lookup
+ */
+uint16_t
+lookup_16bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time AVX lookup function on variable size table
+ * with 16-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 16 bit elements in the table (multiple of 8)
+ *
+ * @return value to lookup
+ */
+uint16_t
+lookup_16bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time SSE lookup function on
+ * variable size table with 32-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 32 bit elements in the table (multiple of 4)
+ *
+ * @return value to lookup
+ */
+uint32_t
+lookup_32bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time AVX lookup function on
+ * variable size table with 32-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 32 bit elements in the table (multiple of 4)
+ *
+ * @return value to lookup
+ */
+uint32_t
+lookup_32bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time SSE lookup function on
+ * variable size table with 64-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 64 bit elements in the table (multiple of 2)
+ *
+ * @return value to lookup
+ */
+uint64_t
+lookup_64bit_sse(const void *table, const uint32_t idx, const uint32_t size);
+
+/*
+ * @brief Constant time AVX lookup function on
+ * variable size table with 64-bit values
+ *
+ * @param[in] table Pointer to the table to look up (16-byte aligned)
+ * @param[in] idx Index to look up
+ * @param[in] size Number of 64 bit elements in the table (multiple of 2)
+ *
+ * @return value to lookup
+ */
+uint64_t
+lookup_64bit_avx(const void *table, const uint32_t idx, const uint32_t size);
+
+#endif /* CONSTANT_LOOKUP_H */
diff --git a/src/spdk/intel-ipsec-mb/include/cpu_feature.h b/src/spdk/intel-ipsec-mb/include/cpu_feature.h
new file mode 100644
index 000000000..1347094a7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/cpu_feature.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "intel-ipsec-mb.h"
+
+#ifndef CPU_FEATURE_H
+#define CPU_FEATURE_H
+
+/**
+ * @brief Detects hardware features and returns their status
+ *
+ * @return Bitmask representing presence of CPU features/extensions,
+ * see intel-ipsec-mb.h IMB_FEATURE_xyz definitions for details.
+ */
+IMB_DLL_LOCAL uint64_t cpu_feature_detect(void);
+
+/**
+ * @brief Modifies CPU \a features mask based on requested \a flags
+ *
+ * @param flags bitmask describing CPU feature adjustments
+ * @param features bitmask describing present CPU features
+ *
+ * @return \a features with applied modifications on them via \a flags
+ */
+IMB_DLL_LOCAL uint64_t
+cpu_feature_adjust(const uint64_t flags, uint64_t features);
+
+#endif /* CPU_FEATURE_H */
diff --git a/src/spdk/intel-ipsec-mb/include/datastruct.asm b/src/spdk/intel-ipsec-mb/include/datastruct.asm
new file mode 100644
index 000000000..0ab1113ab
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/datastruct.asm
@@ -0,0 +1,235 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; Macros for defining data structures
+
+; Usage example
+
+;START_FIELDS ; JOB_AES
+;;; name size align
+;FIELD _plaintext, 8, 8 ; pointer to plaintext
+;FIELD _ciphertext, 8, 8 ; pointer to ciphertext
+;FIELD _IV, 16, 8 ; IV
+;FIELD _keys, 8, 8 ; pointer to keys
+;FIELD _len, 4, 4 ; length in bytes
+;FIELD _status, 4, 4 ; status enumeration
+;FIELD _user_data, 8, 8 ; pointer to user data
+;UNION _union, size1, align1, \
+; size2, align2, \
+; size3, align3, \
+; ...
+;END_FIELDS
+;%assign _JOB_AES_size _FIELD_OFFSET
+;%assign _JOB_AES_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; Alternate "struc-like" syntax:
+; STRUCT job_aes2
+; RES_Q .plaintext, 1
+; RES_Q .ciphertext, 1
+; RES_DQ .IV, 1
+; RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
+; RES_U .union, size1, align1, \
+; size2, align2, \
+; ...
+; ENDSTRUCT
+; ; Following only needed if nesting
+; %assign job_aes2_size _FIELD_OFFSET
+; %assign job_aes2_align _STRUCT_ALIGN
+;
+; RES_* macros take a name, a count and an optional alignment.
+; The count in in terms of the base size of the macro, and the
+; default alignment is the base size.
+; The macros are:
+; Macro Base size
+; RES_B 1
+; RES_W 2
+; RES_D 4
+; RES_Q 8
+; RES_DQ 16
+; RES_Y 32
+; RES_Z 64
+;
+; RES_U defines a union. It's arguments are a name and two or more
+; pairs of "size, alignment"
+;
+; The two assigns are only needed if this structure is being nested
+; within another. Even if the assigns are not done, one can still use
+; STRUCT_NAME_size as the size of the structure.
+;
+; Note that for nesting, you still need to assign to STRUCT_NAME_size.
+;
+; The differences between this and using "struc" directly are that each
+; type is implicitly aligned to its natural length (although this can be
+; over-ridden with an explicit third parameter), and that the structure
+; is padded at the end to its overall alignment.
+;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%ifndef _DATASTRUCT_ASM_
+%define _DATASTRUCT_ASM_
+
+;; START_FIELDS
+%macro START_FIELDS 0
+%assign _FIELD_OFFSET 0
+%assign _STRUCT_ALIGN 0
+%endm
+
+;; FIELD name size align
+%macro FIELD 3
+%define %%name %1
+%define %%size %2
+%define %%align %3
+
+%assign _FIELD_OFFSET (_FIELD_OFFSET + (%%align) - 1) & (~ ((%%align)-1))
+%%name equ _FIELD_OFFSET
+%assign _FIELD_OFFSET _FIELD_OFFSET + (%%size)
+%if (%%align > _STRUCT_ALIGN)
+%assign _STRUCT_ALIGN %%align
+%endif
+%endm
+
+;; END_FIELDS
+%macro END_FIELDS 0
+%assign _FIELD_OFFSET (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
+%endm
+
+%macro UNION 5-*
+%if (0 == (%0 & 1))
+ %error EVEN number of parameters to UNION Macro
+ %err
+%endif
+%rotate 1
+ %assign _UNION_SIZE %1
+ %assign _UNION_ALIGN %2
+%rep (%0 - 3)/2
+ %rotate 2
+ %if (%1 > _UNION_SIZE)
+ %assign _UNION_SIZE %1
+ %endif
+ %if (%2 > _UNION_ALIGN)
+ %assign _UNION_ALIGN %2
+ %endif
+%endrep
+%rotate 2
+FIELD %1, _UNION_SIZE, _UNION_ALIGN
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%macro STRUCT 1
+START_FIELDS
+struc %1
+%endm
+
+%macro ENDSTRUCT 0
+%assign %%tmp _FIELD_OFFSET
+END_FIELDS
+%assign %%tmp (_FIELD_OFFSET - %%tmp)
+%if (%%tmp > 0)
+ resb %%tmp
+%endif
+endstruc
+%endm
+
+;; RES_int name size align
+%macro RES_int 3
+%define %%name %1
+%define %%size %2
+%define %%align %3
+
+%assign _FIELD_OFFSET (_FIELD_OFFSET + (%%align) - 1) & (~ ((%%align)-1))
+align %%align
+%%name resb %%size
+%assign _FIELD_OFFSET _FIELD_OFFSET + (%%size)
+%if (%%align > _STRUCT_ALIGN)
+%assign _STRUCT_ALIGN %%align
+%endif
+%endm
+
+
+
+; macro RES_B name, size [, align]
+%macro RES_B 2-3 1
+RES_int %1, %2, %3
+%endm
+
+; macro RES_W name, size [, align]
+%macro RES_W 2-3 2
+RES_int %1, 2*(%2), %3
+%endm
+
+; macro RES_D name, size [, align]
+%macro RES_D 2-3 4
+RES_int %1, 4*(%2), %3
+%endm
+
+; macro RES_Q name, size [, align]
+%macro RES_Q 2-3 8
+RES_int %1, 8*(%2), %3
+%endm
+
+; macro RES_DQ name, size [, align]
+%macro RES_DQ 2-3 16
+RES_int %1, 16*(%2), %3
+%endm
+
+; macro RES_Y name, size [, align]
+%macro RES_Y 2-3 32
+RES_int %1, 32*(%2), %3
+%endm
+
+; macro RES_Z name, size [, align]
+%macro RES_Z 2-3 64
+RES_int %1, 64*(%2), %3
+%endm
+
+
+%macro RES_U 5-*
+%if (0 == (%0 & 1))
+ %error EVEN number of parameters to RES_U Macro
+ %err
+%endif
+%rotate 1
+ %assign _UNION_SIZE %1
+ %assign _UNION_ALIGN %2
+%rep (%0 - 3)/2
+ %rotate 2
+ %if (%1 > _UNION_SIZE)
+ %assign _UNION_SIZE %1
+ %endif
+ %if (%2 > _UNION_ALIGN)
+ %assign _UNION_ALIGN %2
+ %endif
+%endrep
+%rotate 2
+RES_int %1, _UNION_SIZE, _UNION_ALIGN
+%endm
+
+%endif ; end ifdef _DATASTRUCT_ASM_
diff --git a/src/spdk/intel-ipsec-mb/include/dbgprint.asm b/src/spdk/intel-ipsec-mb/include/dbgprint.asm
new file mode 100644
index 000000000..d14eb0ebc
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/dbgprint.asm
@@ -0,0 +1,413 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; Macros for "printing" for debug purposes from within asm code
+;
+; The basic macros are:
+; DBGPRINT16, DBGPRINT32, DBGPRINT64, DBGPRINT_XMM, DBGPRINT_YMM, DBGPRINT_ZMM
+; These are called with 1 or more arguments, all of which are of the
+; size/type as specified in the name. E.g.
+; DBGPRINT64 reg1, reg2, reg3, ...
+;
+; There is also a macro DEBUGPRINTL that takes one argument, a string. E.g.
+; DBGPRINTL "hit this point in the code"
+;
+; There are also variations on these with the "DBGPRINT" suffixed with "L", e.g.
+; DBGPRINTL64. These take two or more arguments, where the first is a string,
+; and the rest are of the specified type, e.g.
+; DBGPRINTL64 "Rindex", Rindex
+; Essentially, this is the same as a DBGPRINTL followed by DBGPRINT64.
+;
+; If DO_DBGPRINT is defined, then the macros write the debug information into
+; a buffer. If DO_DBGPRINT is *not* defined, then the macros expand to nothing.
+;
+; CAVEAT: The macros need a GPR. Currently, it uses R15. If the first register
+; argument is R15, then it will use R14. This means that if you try
+; DBGPRINTL64 "text", rax, r15
+; you will not get the proper value of r15.
+; One way to avoid this issue is to not use multiple registers on the same line
+; if the register types are GPR (i.e. this is not an issue for printing XMM
+; registers). E.g the above could be done with:
+; DBGPRINTL64 "test", rax
+; DBGPRINT64 r15
+;
+; Note also that the macros only check for r15. Thus is you tried something
+; like (after token expansion):
+; DBGPRINT32 r15d
+; you won't get the right results. If you want to display r15d, you should
+; print it as the 64-bit r15.
+;
+; To actually print the data, from your C code include the file
+; "dbgprint.h". The default buffer size is 16kB. If you want to change
+; that, #define DBG_BUFFER_SIZE before including "dbgprint.h".
+;
+; Then, (after your asm routine(s) have returned, call
+; print_debug() or print_debug(file pointer)
+; If you do not specify a file pointer, it defaults to stdout.
+;
+; Printing the debug data also resets the write pointer to the beginning,
+; effectively "deleting" the previous messages.
+;
+%ifndef DBGPRINT_ASM_INCLUDED
+%define DBGPRINT_ASM_INCLUDED
+
+;%define DO_DBGPRINT
+%ifdef DO_DBGPRINT
+extern pDebugBuffer
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; DBGPRINT_INT size, param, ...
+%macro DBGPRINT_INT 2-*
+%ifidni %2,r15
+%xdefine %%reg r14
+%else
+%xdefine %%reg r15
+%endif
+%xdefine %%size %1
+%rotate 1
+ push %%reg
+ mov %%reg, [pDebugBuffer]
+%rep %0 - 1
+ mov byte [%%reg], %%size
+ %if (%%size == 2)
+ mov word [%%reg+1], %1
+ %elif (%%size == 4)
+ mov dword [%%reg+1], %1
+ %elif (%%size == 8)
+ mov qword [%%reg+1], %1
+ %elif (%%size == 16)
+ movdqu oword [%%reg+1], %1
+ %elif (%%size == 32)
+ vmovdqu [%%reg+1], %1
+ %elif (%%size == 64)
+ vmovdqu32 [%%reg+1], %1
+ %else
+ %error invalid size %%size
+ %endif
+ add %%reg, %%size+1
+%rotate 1
+%endrep
+ mov [pDebugBuffer], %%reg
+ pop %%reg
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; DBGPRINTL_INT size, label, param, ...
+%macro DBGPRINTL_INT 3-*
+%ifidni %3,r15
+%xdefine %%reg r14
+%else
+%xdefine %%reg r15
+%endif
+%xdefine %%size %1
+%rotate 1
+ push %%reg
+ mov %%reg, [pDebugBuffer]
+
+ mov byte [%%reg], 0x57
+section .data
+%%lab: db %1, 0
+section .text
+ mov qword [%%reg+1], %%lab
+ add %%reg, 8+1
+%rotate 1
+
+%rep %0 - 2
+ mov byte [%%reg], %%size
+%if (%%size == 2)
+ mov word [%%reg+1], %1
+%elif (%%size == 4)
+ mov dword [%%reg+1], %1
+%elif (%%size == 8)
+ mov qword [%%reg+1], %1
+%elif (%%size == 16)
+ movdqu oword [%%reg+1], %1
+%elif (%%size == 32)
+ vmovdqu [%%reg+1], %1
+%elif (%%size == 64)
+ vmovdqu32 [%%reg+1], %1
+%else
+%error invalid size %%size
+%endif
+ add %%reg, %%size+1
+%rotate 1
+%endrep
+ mov [pDebugBuffer], %%reg
+ pop %%reg
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; DBGPRINTL* data, ...
+%macro DBGPRINT16 1+
+ DBGPRINT_INT 2, %1
+%endmacro
+%macro DBGPRINT32 1+
+ DBGPRINT_INT 4, %1
+%endmacro
+%macro DBGPRINT64 1+
+ DBGPRINT_INT 8, %1
+%endmacro
+%macro DBGPRINT_XMM 1+
+ DBGPRINT_INT 16, %1
+%endmacro
+%macro DBGPRINT_YMM 1+
+ DBGPRINT_INT 32, %1
+%endmacro
+%macro DBGPRINT_ZMM 1+
+ DBGPRINT_INT 64, %1
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; DBGPRINTL* label, data, ...
+%macro DBGPRINTL16 2+
+ DBGPRINTL_INT 2, %1, %2
+%endmacro
+%macro DBGPRINTL32 2+
+ DBGPRINTL_INT 4, %1, %2
+%endmacro
+%macro DBGPRINTL64 2+
+ DBGPRINTL_INT 8, %1, %2
+%endmacro
+%macro DBGPRINTL_XMM 2+
+ DBGPRINTL_INT 16, %1, %2
+%endmacro
+%macro DBGPRINTL_YMM 2+
+ DBGPRINTL_INT 32, %1, %2
+%endmacro
+%macro DBGPRINTL_ZMM 2+
+ DBGPRINTL_INT 64, %1, %2
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINTL 1
+ push r15
+ mov r15, [pDebugBuffer]
+
+ mov byte [r15], 0x57
+section .data
+%%lab: db %1, 0
+section .text
+ mov qword [r15+1], %%lab
+ add r15, 8+1
+
+ mov [pDebugBuffer], r15
+ pop r15
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%else
+%macro DBGPRINT16 1+
+%endmacro
+%macro DBGPRINT32 1+
+%endmacro
+%macro DBGPRINT64 1+
+%endmacro
+%macro DBGPRINT_XMM 1+
+%endmacro
+%macro DBGPRINT_YMM 1+
+%endmacro
+%macro DBGPRINT_ZMM 1+
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINTL16 2+
+%endmacro
+%macro DBGPRINTL32 2+
+%endmacro
+%macro DBGPRINTL64 2+
+%endmacro
+%macro DBGPRINTL_XMM 2+
+%endmacro
+%macro DBGPRINTL_YMM 2+
+%endmacro
+%macro DBGPRINTL_ZMM 2+
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINTL 1
+%endmacro
+%endif
+
+
+
+%if 0 ; OLD
+%macro DBGPRINTL_ZMM 2-*
+ push rax
+ mov rax, [pDebugBuffer]
+
+ mov byte [rax], 0x57
+section .data
+%%lab: db %1, 0
+section .text
+ mov qword [rax+1], %%lab
+ add rax, 8+1
+%rotate 1
+
+%rep %0 - 1
+ mov byte [rax], 64
+ vmovdqu32 [rax+1], %1
+%rotate 1
+ add rax, 64+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT_ZMM 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 64
+ vmovdqu32 [rax+1], %1
+%rotate 1
+ add rax, 64+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT_YMM 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 32
+ vmovdqu [rax+1], %1
+%rotate 1
+ add rax, 32+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT_XMM 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 16
+ vmovdqu oword [rax+1], %1
+%rotate 1
+ add rax, 16+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINTL64 2-*
+ push rax
+ mov rax, [pDebugBuffer]
+
+ mov byte [rax], 0x57
+section .data
+%%lab: db %1, 0
+section .text
+ mov qword [rax+1], %%lab
+ add rax, 8+1
+%rotate 1
+
+%rep %0 - 1
+ mov byte [rax], 8
+ mov qword [rax+1], %1
+%rotate 1
+ add rax, 8+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT64 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 8
+ mov qword [rax+1], %1
+%rotate 1
+ add rax, 8+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT32 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 4
+ mov dword [rax+1], %1
+%rotate 1
+ add rax, 4+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT16 1-*
+ push rax
+ mov rax, [pDebugBuffer]
+%rep %0
+ mov byte [rax], 2
+ mov word [rax+1], %1
+%rotate 1
+ add rax, 2+1
+%endrep
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGPRINT_LAB 1
+ push rax
+ mov rax, [pDebugBuffer]
+
+ mov byte [rax], 0x57
+section .data
+%%lab: db %1, 0
+section .text
+ mov qword [rax+1], %%lab
+ add rax, 8+1
+
+ mov [pDebugBuffer], rax
+ pop rax
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro DBGHIST 2
+ inc dword [%1 + 4 * %2]
+%endmacro
+%macro DBGPRINT_ZMM 1-*
+%endmacro
+%macro DBGPRINT_YMM 1-*
+%endmacro
+%macro DBGPRINT_XMM 1-*
+%endmacro
+%macro DBGPRINT64 1-*
+%endmacro
+%macro DBGPRINT32 1-*
+%endmacro
+%macro DBGPRINT16 1-*
+%endmacro
+%macro DBGHIST 2
+%endmacro
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%endif ; ifdef 0 ; OLD
+
+%endif ; DBGPRINT_ASM_INCLUDED
diff --git a/src/spdk/intel-ipsec-mb/include/des_utils.h b/src/spdk/intel-ipsec-mb/include/des_utils.h
new file mode 100644
index 000000000..4358132d0
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/des_utils.h
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ Copyright (c) 2017-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/* DES utility functions and macros */
+
+#ifndef DES_UTILS_H
+#define DES_UTILS_H
+
+#include <stdint.h>
+#include "intel-ipsec-mb.h"
+
+/**
+ * @brief Gets selected bit value out of a 64-bit word
+ *
+ * @param val 64-bit word
+ * @param n bit number (0 to 63) to get value of
+ *
+ * @return n-th bit value (0 or 1 value only)
+ */
+__forceinline
+uint64_t bit_get64b(const uint64_t val, const unsigned n)
+{
+ IMB_ASSERT(n < 64);
+ return (val >> n) & UINT64_C(1);
+}
+
+/**
+ * @brief Sets selected bit in a 64-bit word
+ *
+ * @param val 64-bit word
+ * @param n bit number (0 to 63) to get value of
+ * @param b bit value (0 or 1)
+ *
+ * @return val with n-th bit set to value b
+ */
+__forceinline
+uint64_t bit_set64b(const uint64_t val, const unsigned n, const uint64_t b)
+{
+ const uint64_t m = UINT64_C(1) << n;
+
+ IMB_ASSERT(n < 64);
+ return (val & (~m)) | (b << n);
+}
+
+/**
+ * @brief Permutes bits in a 64-bit word as described by pattern
+ *
+ * The function goes through pattern array from index 0 to 'size' (max 63).
+ * It sets output bit number 'index' to value of
+ * bit number 'pattern[index] - 1' from 'in'.
+ *
+ * @param in 64-bit word to be permuted
+ * @param pattern pointer to array defining the permutation
+ * @param size is size of the permutation pattern
+ *
+ * @return permuted in word as described by the pattern
+ */
+__forceinline
+uint64_t permute_64b(const uint64_t in, const uint8_t *pattern, const int size)
+{
+ uint64_t out = 0;
+ int n = 0;
+
+ IMB_ASSERT(size <= 64);
+
+ for (n = 0; n < size; n++) {
+ /* '-1' is required as bit numbers in FIPS start with 1 not 0 */
+ const int m = ((int) pattern[n]) - 1;
+ const uint64_t bit_val = bit_get64b(in, m);
+
+ out = bit_set64b(out, n, bit_val);
+ }
+
+ return out;
+}
+
+static const uint8_t reflect_tab[16] = {
+ /* [ 0] 0000 => 0000 */ 0, /* [ 1] 0001 => 1000 */ 8,
+ /* [ 2] 0010 => 0100 */ 4, /* [ 3] 0011 => 1100 */ 12,
+ /* [ 4] 0100 => 0010 */ 2, /* [ 5] 0101 => 1010 */ 10,
+ /* [ 6] 0110 => 0110 */ 6, /* [ 7] 0111 => 1110 */ 14,
+ /* [ 8] 1000 => 0001 */ 1, /* [ 9] 1001 => 1001 */ 9,
+ /* [10] 1010 => 0101 */ 5, /* [11] 1011 => 1101 */ 13,
+ /* [12] 1100 => 0011 */ 3, /* [13] 1101 => 1011 */ 11,
+ /* [14] 1110 => 0111 */ 7, /* [15] 1111 => 1111 */ 15
+};
+
+__forceinline
+uint8_t reflect_8b(const uint8_t pb)
+{
+ return reflect_tab[pb >> 4] | (reflect_tab[pb & 15] << 4);
+}
+
+__forceinline
+uint64_t load64_reflect(const void *key)
+{
+ const uint8_t *kb = (const uint8_t *) key;
+
+ return ((uint64_t) reflect_8b(kb[0])) |
+ ((uint64_t) reflect_8b(kb[1])) << 8 |
+ ((uint64_t) reflect_8b(kb[2])) << 16 |
+ ((uint64_t) reflect_8b(kb[3])) << 24 |
+ ((uint64_t) reflect_8b(kb[4])) << 32 |
+ ((uint64_t) reflect_8b(kb[5])) << 40 |
+ ((uint64_t) reflect_8b(kb[6])) << 48 |
+ ((uint64_t) reflect_8b(kb[7])) << 56;
+}
+
+
+#endif /* DES_UTILS_H */
diff --git a/src/spdk/intel-ipsec-mb/include/gcm.h b/src/spdk/intel-ipsec-mb/include/gcm.h
new file mode 100644
index 000000000..bcc13cb3a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/gcm.h
@@ -0,0 +1,428 @@
+/*******************************************************************************
+ Copyright (c) 2018-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "intel-ipsec-mb.h"
+
+#ifndef NO_GCM
+
+#ifndef _GCM_H_
+#define _GCM_H_
+
+/*
+ * AVX512+VAES+VPCLMULQDQ GCM API
+ * - intentionally this is not exposed in intel-ipsec-mb.h
+ * - available through IMB_GCM_xxx() macros from intel-ipsec-mb.h
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_init_128_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_192_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_256_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_vaes_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_128_vaes_avx512(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_192_vaes_avx512(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_256_vaes_avx512(struct gcm_key_data *key_data);
+
+IMB_DLL_EXPORT void
+aes_gcm_pre_128_vaes_avx512(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_192_vaes_avx512(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_256_vaes_avx512(const void *key, struct gcm_key_data *key_data);
+
+/*
+ * AVX512 GCM API
+ * - intentionally this is not exposed in intel-ipsec-mb.h
+ * - available through IMB_GCM_xxx() macros from intel-ipsec-mb.h
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_init_128_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_192_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_256_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_avx512(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_128_avx512(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_192_avx512(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_256_avx512(struct gcm_key_data *key_data);
+
+IMB_DLL_EXPORT void
+aes_gcm_pre_128_avx512(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_192_avx512(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_256_avx512(const void *key, struct gcm_key_data *key_data);
+
+/*
+ * AESNI emulation GCM API (based on SSE acrhitecture)
+ * - intentionally this is not exposed in intel-ipsec-mb.h
+ * - available through IMB_GCM_xxx() macros from intel-ipsec-mb.h
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len, uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len, uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len, uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len, uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len, uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_128_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_192_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_256_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad,
+ uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in,
+ uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_sse_no_aesni(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_128_sse_no_aesni(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_192_sse_no_aesni(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_precomp_256_sse_no_aesni(struct gcm_key_data *key_data);
+
+IMB_DLL_EXPORT void
+aes_gcm_pre_128_sse_no_aesni(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_192_sse_no_aesni(const void *key, struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void
+aes_gcm_pre_256_sse_no_aesni(const void *key, struct gcm_key_data *key_data);
+
+#endif /* _GCM_H_ */
+#endif /* NO_GCM */
diff --git a/src/spdk/intel-ipsec-mb/include/gcm_defines.asm b/src/spdk/intel-ipsec-mb/include/gcm_defines.asm
new file mode 100644
index 000000000..31a961729
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/gcm_defines.asm
@@ -0,0 +1,272 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef GCM_DEFINES_ASM_INCLUDED
+%define GCM_DEFINES_ASM_INCLUDED
+
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+
+section .data
+default rel
+
+align 16
+POLY: dq 0x0000000000000001, 0xC200000000000000
+
+align 64
+POLY2:
+ dq 0x00000001C2000000, 0xC200000000000000
+ dq 0x00000001C2000000, 0xC200000000000000
+ dq 0x00000001C2000000, 0xC200000000000000
+ dq 0x00000001C2000000, 0xC200000000000000
+
+align 16
+TWOONE: dq 0x0000000000000001, 0x0000000100000000
+
+;;; @note Order of these constants should not change.
+;;; More specifically, ALL_F should follow SHIFT_MASK, and ZERO should follow ALL_F
+align 64
+SHUF_MASK:
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+ dq 0x08090A0B0C0D0E0F, 0x0001020304050607
+
+align 16
+SHIFT_MASK:
+ dq 0x0706050403020100, 0x0f0e0d0c0b0a0908
+
+ALL_F:
+ dq 0xffffffffffffffff, 0xffffffffffffffff
+
+ZERO:
+ dq 0x0000000000000000, 0x0000000000000000
+
+align 16
+ONE:
+ dq 0x0000000000000001, 0x0000000000000000
+
+align 16
+TWO:
+ dq 0x0000000000000002, 0x0000000000000000
+
+align 16
+ONEf:
+ dq 0x0000000000000000, 0x0100000000000000
+
+align 16
+TWOf:
+ dq 0x0000000000000000, 0x0200000000000000
+
+align 64
+ddq_add_1234:
+ dq 0x0000000000000001, 0x0000000000000000
+ dq 0x0000000000000002, 0x0000000000000000
+ dq 0x0000000000000003, 0x0000000000000000
+ dq 0x0000000000000004, 0x0000000000000000
+
+align 64
+ddq_add_5678:
+ dq 0x0000000000000005, 0x0000000000000000
+ dq 0x0000000000000006, 0x0000000000000000
+ dq 0x0000000000000007, 0x0000000000000000
+ dq 0x0000000000000008, 0x0000000000000000
+
+align 64
+ddq_add_4444:
+ dq 0x0000000000000004, 0x0000000000000000
+ dq 0x0000000000000004, 0x0000000000000000
+ dq 0x0000000000000004, 0x0000000000000000
+ dq 0x0000000000000004, 0x0000000000000000
+
+align 64
+ddq_add_8888:
+ dq 0x0000000000000008, 0x0000000000000000
+ dq 0x0000000000000008, 0x0000000000000000
+ dq 0x0000000000000008, 0x0000000000000000
+ dq 0x0000000000000008, 0x0000000000000000
+
+align 64
+ddq_addbe_1234:
+ dq 0x0000000000000000, 0x0100000000000000
+ dq 0x0000000000000000, 0x0200000000000000
+ dq 0x0000000000000000, 0x0300000000000000
+ dq 0x0000000000000000, 0x0400000000000000
+
+align 64
+ddq_addbe_5678:
+ dq 0x0000000000000000, 0x0500000000000000
+ dq 0x0000000000000000, 0x0600000000000000
+ dq 0x0000000000000000, 0x0700000000000000
+ dq 0x0000000000000000, 0x0800000000000000
+
+align 64
+ddq_addbe_4444:
+ dq 0x0000000000000000, 0x0400000000000000
+ dq 0x0000000000000000, 0x0400000000000000
+ dq 0x0000000000000000, 0x0400000000000000
+ dq 0x0000000000000000, 0x0400000000000000
+
+align 64
+ddq_addbe_8888:
+ dq 0x0000000000000000, 0x0800000000000000
+ dq 0x0000000000000000, 0x0800000000000000
+ dq 0x0000000000000000, 0x0800000000000000
+ dq 0x0000000000000000, 0x0800000000000000
+
+align 64
+byte_len_to_mask_table:
+ dw 0x0000, 0x0001, 0x0003, 0x0007,
+ dw 0x000f, 0x001f, 0x003f, 0x007f,
+ dw 0x00ff, 0x01ff, 0x03ff, 0x07ff,
+ dw 0x0fff, 0x1fff, 0x3fff, 0x7fff,
+ dw 0xffff
+
+align 64
+byte64_len_to_mask_table:
+ dq 0x0000000000000000, 0x0000000000000001
+ dq 0x0000000000000003, 0x0000000000000007
+ dq 0x000000000000000f, 0x000000000000001f
+ dq 0x000000000000003f, 0x000000000000007f
+ dq 0x00000000000000ff, 0x00000000000001ff
+ dq 0x00000000000003ff, 0x00000000000007ff
+ dq 0x0000000000000fff, 0x0000000000001fff
+ dq 0x0000000000003fff, 0x0000000000007fff
+ dq 0x000000000000ffff, 0x000000000001ffff
+ dq 0x000000000003ffff, 0x000000000007ffff
+ dq 0x00000000000fffff, 0x00000000001fffff
+ dq 0x00000000003fffff, 0x00000000007fffff
+ dq 0x0000000000ffffff, 0x0000000001ffffff
+ dq 0x0000000003ffffff, 0x0000000007ffffff
+ dq 0x000000000fffffff, 0x000000001fffffff
+ dq 0x000000003fffffff, 0x000000007fffffff
+ dq 0x00000000ffffffff, 0x00000001ffffffff
+ dq 0x00000003ffffffff, 0x00000007ffffffff
+ dq 0x0000000fffffffff, 0x0000001fffffffff
+ dq 0x0000003fffffffff, 0x0000007fffffffff
+ dq 0x000000ffffffffff, 0x000001ffffffffff
+ dq 0x000003ffffffffff, 0x000007ffffffffff
+ dq 0x00000fffffffffff, 0x00001fffffffffff
+ dq 0x00003fffffffffff, 0x00007fffffffffff
+ dq 0x0000ffffffffffff, 0x0001ffffffffffff
+ dq 0x0003ffffffffffff, 0x0007ffffffffffff
+ dq 0x000fffffffffffff, 0x001fffffffffffff
+ dq 0x003fffffffffffff, 0x007fffffffffffff
+ dq 0x00ffffffffffffff, 0x01ffffffffffffff
+ dq 0x03ffffffffffffff, 0x07ffffffffffffff
+ dq 0x0fffffffffffffff, 0x1fffffffffffffff
+ dq 0x3fffffffffffffff, 0x7fffffffffffffff
+ dq 0xffffffffffffffff
+
+align 64
+mask_out_top_block:
+ dq 0xffffffffffffffff, 0xffffffffffffffff
+ dq 0xffffffffffffffff, 0xffffffffffffffff
+ dq 0xffffffffffffffff, 0xffffffffffffffff
+ dq 0x0000000000000000, 0x0000000000000000
+
+section .text
+
+;;define the fields of gcm_context_data struct
+;; struct gcm_context_data {
+;; // init, update and finalize context data
+;; uint8_t aad_hash[GCM_BLOCK_LEN];
+;; uint64_t aad_length;
+;; uint64_t in_length;
+;; uint8_t partial_block_enc_key[GCM_BLOCK_LEN];
+;; uint8_t orig_IV[GCM_BLOCK_LEN];
+;; uint8_t current_counter[GCM_BLOCK_LEN];
+;; uint64_t partial_block_length;
+;; };
+
+%define AadHash (16*0) ; store current Hash of data which has been input
+%define AadLen (16*1) ; store length of input data which will not be encrypted or decrypted
+%define InLen ((16*1)+8); store length of input data which will be encrypted or decrypted
+%define PBlockEncKey (16*2) ; encryption key for the partial block at the end of the previous update
+%define OrigIV (16*3) ; input IV
+%define CurCount (16*4) ; Current counter for generation of encryption key
+%define PBlockLen (16*5) ; length of partial block at the end of the previous update
+
+%define reg(q) xmm %+ q
+%define regy(q) ymm %+ q
+%define regz(q) zmm %+ q
+
+%ifdef WIN_ABI
+ %xdefine arg1 rcx
+ %xdefine arg2 rdx
+ %xdefine arg3 r8
+ %xdefine arg4 r9
+ %xdefine arg5 qword [r14 + STACK_OFFSET + 8*5]
+ %xdefine arg6 qword [r14 + STACK_OFFSET + 8*6]
+ %xdefine arg7 qword [r14 + STACK_OFFSET + 8*7]
+ %xdefine arg8 qword [r14 + STACK_OFFSET + 8*8]
+ %xdefine arg9 qword [r14 + STACK_OFFSET + 8*9]
+ %xdefine arg10 qword [r14 + STACK_OFFSET + 8*10]
+%else
+ %xdefine arg1 rdi
+ %xdefine arg2 rsi
+ %xdefine arg3 rdx
+ %xdefine arg4 rcx
+ %xdefine arg5 r8
+ %xdefine arg6 r9
+ %xdefine arg7 qword [r14 + STACK_OFFSET + 8*1]
+ %xdefine arg8 qword [r14 + STACK_OFFSET + 8*2]
+ %xdefine arg9 qword [r14 + STACK_OFFSET + 8*3]
+ %xdefine arg10 qword [r14 + STACK_OFFSET + 8*4]
+%endif
+
+%ifdef NT_LDST
+ %define NT_LD
+ %define NT_ST
+%endif
+
+;;; Use Non-temporal load/stor
+%ifdef NT_LD
+ %define XLDR movntdqa
+ %define VXLDR vmovntdqa
+ %define VX512LDR vmovntdqa
+%else
+ %define XLDR movdqu
+ %define VXLDR vmovdqu
+ %define VX512LDR vmovdqu8
+%endif
+
+;;; Use Non-temporal load/stor
+%ifdef NT_ST
+ %define XSTR movntdq
+ %define VXSTR vmovntdq
+ %define VX512STR vmovntdq
+%else
+ %define XSTR movdqu
+ %define VXSTR vmovdqu
+ %define VX512STR vmovdqu8
+%endif
+
+%endif ; GCM_DEFINES_ASM_INCLUDED
diff --git a/src/spdk/intel-ipsec-mb/include/gcm_keys_avx2_avx512.asm b/src/spdk/intel-ipsec-mb/include/gcm_keys_avx2_avx512.asm
new file mode 100644
index 000000000..d812e53bd
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/gcm_keys_avx2_avx512.asm
@@ -0,0 +1,52 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef GCM_KEYS_AVX2_AVX512_INCLUDED
+%define GCM_KEYS_AVX2_AVX512_INCLUDED
+
+;; Define the fields of gcm_key_data struct:
+;; uint8_t expanded_keys[GCM_ENC_KEY_LEN * GCM_KEY_SETS];
+;; uint8_t shifted_hkey_8[GCM_ENC_KEY_LEN]; // HashKey^8 <<1 mod poly
+;; uint8_t shifted_hkey_7[GCM_ENC_KEY_LEN]; // HashKey^7 <<1 mod poly
+;; uint8_t shifted_hkey_6[GCM_ENC_KEY_LEN]; // HashKey^6 <<1 mod poly
+;; uint8_t shifted_hkey_5[GCM_ENC_KEY_LEN]; // HashKey^5 <<1 mod poly
+;; uint8_t shifted_hkey_4[GCM_ENC_KEY_LEN]; // HashKey^4 <<1 mod poly
+;; uint8_t shifted_hkey_3[GCM_ENC_KEY_LEN]; // HashKey^3 <<1 mod poly
+;; uint8_t shifted_hkey_2[GCM_ENC_KEY_LEN]; // HashKey^2 <<1 mod poly
+;; uint8_t shifted_hkey_1[GCM_ENC_KEY_LEN]; // HashKey <<1 mod poly
+
+%define HashKey_8 (16*15) ; HashKey^8 <<1 mod poly
+%define HashKey_7 (16*16) ; HashKey^7 <<1 mod poly
+%define HashKey_6 (16*17) ; HashKey^6 <<1 mod poly
+%define HashKey_5 (16*18) ; HashKey^5 <<1 mod poly
+%define HashKey_4 (16*19) ; HashKey^4 <<1 mod poly
+%define HashKey_3 (16*20) ; HashKey^3 <<1 mod poly
+%define HashKey_2 (16*21) ; HashKey^2 <<1 mod poly
+%define HashKey_1 (16*22) ; HashKey <<1 mod poly
+%define HashKey (16*22) ; HashKey <<1 mod poly
+
+%endif ; GCM_KEYS_AVX2_AVX512_INCLUDED
diff --git a/src/spdk/intel-ipsec-mb/include/gcm_keys_sse_avx.asm b/src/spdk/intel-ipsec-mb/include/gcm_keys_sse_avx.asm
new file mode 100644
index 000000000..f7531e5a7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/gcm_keys_sse_avx.asm
@@ -0,0 +1,73 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef GCM_KEYS_SSE_AVX_INCLUDED
+%define GCM_KEYS_SSE_AVX_INCLUDED
+
+;; Define the fields of gcm_key_data struct:
+;; uint8_t expanded_keys[GCM_ENC_KEY_LEN * GCM_KEY_SETS];
+;; uint8_t shifted_hkey_8[GCM_ENC_KEY_LEN]; // HashKey^8 <<1 mod poly
+;; uint8_t shifted_hkey_7[GCM_ENC_KEY_LEN]; // HashKey^7 <<1 mod poly
+;; uint8_t shifted_hkey_6[GCM_ENC_KEY_LEN]; // HashKey^6 <<1 mod poly
+;; uint8_t shifted_hkey_5[GCM_ENC_KEY_LEN]; // HashKey^5 <<1 mod poly
+;; uint8_t shifted_hkey_4[GCM_ENC_KEY_LEN]; // HashKey^4 <<1 mod poly
+;; uint8_t shifted_hkey_3[GCM_ENC_KEY_LEN]; // HashKey^3 <<1 mod poly
+;; uint8_t shifted_hkey_2[GCM_ENC_KEY_LEN]; // HashKey^2 <<1 mod poly
+;; uint8_t shifted_hkey_1[GCM_ENC_KEY_LEN]; // HashKey <<1 mod poly
+;; uint8_t shifted_hkey_1_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_2_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^2 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_3_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^3 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_4_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^4 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_5_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^5 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_6_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^6 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_7_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^7 <<1 mod poly (Karatsuba)
+;; uint8_t shifted_hkey_8_k[GCM_ENC_KEY_LEN]; // XOR of High and Low 64 bits of HashKey^8 <<1 mod poly (Karatsuba)
+
+;;
+;; Key structure holds up to 8 ghash keys
+;;
+%define HashKey_8 (16*15) ; HashKey^8 <<1 mod poly
+%define HashKey_7 (16*16) ; HashKey^7 <<1 mod poly
+%define HashKey_6 (16*17) ; HashKey^6 <<1 mod poly
+%define HashKey_5 (16*18) ; HashKey^5 <<1 mod poly
+%define HashKey_4 (16*19) ; HashKey^4 <<1 mod poly
+%define HashKey_3 (16*20) ; HashKey^3 <<1 mod poly
+%define HashKey_2 (16*21) ; HashKey^2 <<1 mod poly
+%define HashKey_1 (16*22) ; HashKey <<1 mod poly
+%define HashKey (16*22) ; HashKey <<1 mod poly
+;; ghash keys for Karatsuba multiply
+%define HashKey_k (16*23) ; XOR of High 64 bits and Low 64 bits of HashKey <<1 mod poly
+%define HashKey_1_k (16*23) ; XOR of High 64 bits and Low 64 bits of HashKey <<1 mod poly
+%define HashKey_2_k (16*24) ; XOR of High 64 bits and Low 64 bits of HashKey^2 <<1 mod poly
+%define HashKey_3_k (16*25) ; XOR of High 64 bits and Low 64 bits of HashKey^3 <<1 mod poly
+%define HashKey_4_k (16*26) ; XOR of High 64 bits and Low 64 bits of HashKey^4 <<1 mod poly
+%define HashKey_5_k (16*27) ; XOR of High 64 bits and Low 64 bits of HashKey^5 <<1 mod poly
+%define HashKey_6_k (16*28) ; XOR of High 64 bits and Low 64 bits of HashKey^6 <<1 mod poly
+%define HashKey_7_k (16*29) ; XOR of High 64 bits and Low 64 bits of HashKey^7 <<1 mod poly
+%define HashKey_8_k (16*30) ; XOR of High 64 bits and Low 64 bits of HashKey^8 <<1 mod poly
+
+%endif ; GCM_KEYS_SSE_AVX_INCLUDED
diff --git a/src/spdk/intel-ipsec-mb/include/gcm_keys_vaes_avx512.asm b/src/spdk/intel-ipsec-mb/include/gcm_keys_vaes_avx512.asm
new file mode 100644
index 000000000..4aea2f5c9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/gcm_keys_vaes_avx512.asm
@@ -0,0 +1,231 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef GCM_KEYS_VAES_AVX512_INCLUDED
+%define GCM_KEYS_VAES_AVX512_INCLUDED
+
+;; Define the fields of gcm_key_data struct:
+;; uint8_t expanded_keys[GCM_ENC_KEY_LEN * GCM_KEY_SETS];
+;; uint8_t shifted_hkey_9_128[GCM_ENC_KEY_LEN * (128 - 8)];
+;; uint8_t shifted_hkey_8[GCM_ENC_KEY_LEN]; // HashKey^8 <<1 mod poly
+;; uint8_t shifted_hkey_7[GCM_ENC_KEY_LEN]; // HashKey^7 <<1 mod poly
+;; uint8_t shifted_hkey_6[GCM_ENC_KEY_LEN]; // HashKey^6 <<1 mod poly
+;; uint8_t shifted_hkey_5[GCM_ENC_KEY_LEN]; // HashKey^5 <<1 mod poly
+;; uint8_t shifted_hkey_4[GCM_ENC_KEY_LEN]; // HashKey^4 <<1 mod poly
+;; uint8_t shifted_hkey_3[GCM_ENC_KEY_LEN]; // HashKey^3 <<1 mod poly
+;; uint8_t shifted_hkey_2[GCM_ENC_KEY_LEN]; // HashKey^2 <<1 mod poly
+;; uint8_t shifted_hkey_1[GCM_ENC_KEY_LEN]; // HashKey <<1 mod poly
+
+%ifdef GCM_BIG_DATA
+;;
+;; Key structure holds up to 128 ghash keys
+;;
+%define HashKey_128 (16*15) ; HashKey^128 <<1 mod poly
+%define HashKey_127 (16*16) ; HashKey^127 <<1 mod poly
+%define HashKey_126 (16*17) ; HashKey^126 <<1 mod poly
+%define HashKey_125 (16*18) ; HashKey^125 <<1 mod poly
+%define HashKey_124 (16*19) ; HashKey^124 <<1 mod poly
+%define HashKey_123 (16*20) ; HashKey^123 <<1 mod poly
+%define HashKey_122 (16*21) ; HashKey^122 <<1 mod poly
+%define HashKey_121 (16*22) ; HashKey^121 <<1 mod poly
+%define HashKey_120 (16*23) ; HashKey^120 <<1 mod poly
+%define HashKey_119 (16*24) ; HashKey^119 <<1 mod poly
+%define HashKey_118 (16*25) ; HashKey^118 <<1 mod poly
+%define HashKey_117 (16*26) ; HashKey^117 <<1 mod poly
+%define HashKey_116 (16*27) ; HashKey^116 <<1 mod poly
+%define HashKey_115 (16*28) ; HashKey^115 <<1 mod poly
+%define HashKey_114 (16*29) ; HashKey^114 <<1 mod poly
+%define HashKey_113 (16*30) ; HashKey^113 <<1 mod poly
+%define HashKey_112 (16*31) ; HashKey^112 <<1 mod poly
+%define HashKey_111 (16*32) ; HashKey^111 <<1 mod poly
+%define HashKey_110 (16*33) ; HashKey^110 <<1 mod poly
+%define HashKey_109 (16*34) ; HashKey^109 <<1 mod poly
+%define HashKey_108 (16*35) ; HashKey^108 <<1 mod poly
+%define HashKey_107 (16*36) ; HashKey^107 <<1 mod poly
+%define HashKey_106 (16*37) ; HashKey^106 <<1 mod poly
+%define HashKey_105 (16*38) ; HashKey^105 <<1 mod poly
+%define HashKey_104 (16*39) ; HashKey^104 <<1 mod poly
+%define HashKey_103 (16*40) ; HashKey^103 <<1 mod poly
+%define HashKey_102 (16*41) ; HashKey^102 <<1 mod poly
+%define HashKey_101 (16*42) ; HashKey^101 <<1 mod poly
+%define HashKey_100 (16*43) ; HashKey^100 <<1 mod poly
+%define HashKey_99 (16*44) ; HashKey^99 <<1 mod poly
+%define HashKey_98 (16*45) ; HashKey^98 <<1 mod poly
+%define HashKey_97 (16*46) ; HashKey^97 <<1 mod poly
+%define HashKey_96 (16*47) ; HashKey^96 <<1 mod poly
+%define HashKey_95 (16*48) ; HashKey^95 <<1 mod poly
+%define HashKey_94 (16*49) ; HashKey^94 <<1 mod poly
+%define HashKey_93 (16*50) ; HashKey^93 <<1 mod poly
+%define HashKey_92 (16*51) ; HashKey^92 <<1 mod poly
+%define HashKey_91 (16*52) ; HashKey^91 <<1 mod poly
+%define HashKey_90 (16*53) ; HashKey^90 <<1 mod poly
+%define HashKey_89 (16*54) ; HashKey^89 <<1 mod poly
+%define HashKey_88 (16*55) ; HashKey^88 <<1 mod poly
+%define HashKey_87 (16*56) ; HashKey^87 <<1 mod poly
+%define HashKey_86 (16*57) ; HashKey^86 <<1 mod poly
+%define HashKey_85 (16*58) ; HashKey^85 <<1 mod poly
+%define HashKey_84 (16*59) ; HashKey^84 <<1 mod poly
+%define HashKey_83 (16*60) ; HashKey^83 <<1 mod poly
+%define HashKey_82 (16*61) ; HashKey^82 <<1 mod poly
+%define HashKey_81 (16*62) ; HashKey^81 <<1 mod poly
+%define HashKey_80 (16*63) ; HashKey^80 <<1 mod poly
+%define HashKey_79 (16*64) ; HashKey^79 <<1 mod poly
+%define HashKey_78 (16*65) ; HashKey^78 <<1 mod poly
+%define HashKey_77 (16*66) ; HashKey^77 <<1 mod poly
+%define HashKey_76 (16*67) ; HashKey^76 <<1 mod poly
+%define HashKey_75 (16*68) ; HashKey^75 <<1 mod poly
+%define HashKey_74 (16*69) ; HashKey^74 <<1 mod poly
+%define HashKey_73 (16*70) ; HashKey^73 <<1 mod poly
+%define HashKey_72 (16*71) ; HashKey^72 <<1 mod poly
+%define HashKey_71 (16*72) ; HashKey^71 <<1 mod poly
+%define HashKey_70 (16*73) ; HashKey^70 <<1 mod poly
+%define HashKey_69 (16*74) ; HashKey^69 <<1 mod poly
+%define HashKey_68 (16*75) ; HashKey^68 <<1 mod poly
+%define HashKey_67 (16*76) ; HashKey^67 <<1 mod poly
+%define HashKey_66 (16*77) ; HashKey^66 <<1 mod poly
+%define HashKey_65 (16*78) ; HashKey^65 <<1 mod poly
+%define HashKey_64 (16*79) ; HashKey^64 <<1 mod poly
+%define HashKey_63 (16*80) ; HashKey^63 <<1 mod poly
+%define HashKey_62 (16*81) ; HashKey^62 <<1 mod poly
+%define HashKey_61 (16*82) ; HashKey^61 <<1 mod poly
+%define HashKey_60 (16*83) ; HashKey^60 <<1 mod poly
+%define HashKey_59 (16*84) ; HashKey^59 <<1 mod poly
+%define HashKey_58 (16*85) ; HashKey^58 <<1 mod poly
+%define HashKey_57 (16*86) ; HashKey^57 <<1 mod poly
+%define HashKey_56 (16*87) ; HashKey^56 <<1 mod poly
+%define HashKey_55 (16*88) ; HashKey^55 <<1 mod poly
+%define HashKey_54 (16*89) ; HashKey^54 <<1 mod poly
+%define HashKey_53 (16*90) ; HashKey^53 <<1 mod poly
+%define HashKey_52 (16*91) ; HashKey^52 <<1 mod poly
+%define HashKey_51 (16*92) ; HashKey^51 <<1 mod poly
+%define HashKey_50 (16*93) ; HashKey^50 <<1 mod poly
+%define HashKey_49 (16*94) ; HashKey^49 <<1 mod poly
+%define HashKey_48 (16*95) ; HashKey^48 <<1 mod poly
+%define HashKey_47 (16*96) ; HashKey^47 <<1 mod poly
+%define HashKey_46 (16*97) ; HashKey^46 <<1 mod poly
+%define HashKey_45 (16*98) ; HashKey^45 <<1 mod poly
+%define HashKey_44 (16*99) ; HashKey^44 <<1 mod poly
+%define HashKey_43 (16*100) ; HashKey^43 <<1 mod poly
+%define HashKey_42 (16*101) ; HashKey^42 <<1 mod poly
+%define HashKey_41 (16*102) ; HashKey^41 <<1 mod poly
+%define HashKey_40 (16*103) ; HashKey^40 <<1 mod poly
+%define HashKey_39 (16*104) ; HashKey^39 <<1 mod poly
+%define HashKey_38 (16*105) ; HashKey^38 <<1 mod poly
+%define HashKey_37 (16*106) ; HashKey^37 <<1 mod poly
+%define HashKey_36 (16*107) ; HashKey^36 <<1 mod poly
+%define HashKey_35 (16*108) ; HashKey^35 <<1 mod poly
+%define HashKey_34 (16*109) ; HashKey^34 <<1 mod poly
+%define HashKey_33 (16*110) ; HashKey^33 <<1 mod poly
+%define HashKey_32 (16*111) ; HashKey^32 <<1 mod poly
+%define HashKey_31 (16*112) ; HashKey^31 <<1 mod poly
+%define HashKey_30 (16*113) ; HashKey^30 <<1 mod poly
+%define HashKey_29 (16*114) ; HashKey^29 <<1 mod poly
+%define HashKey_28 (16*115) ; HashKey^28 <<1 mod poly
+%define HashKey_27 (16*116) ; HashKey^27 <<1 mod poly
+%define HashKey_26 (16*117) ; HashKey^26 <<1 mod poly
+%define HashKey_25 (16*118) ; HashKey^25 <<1 mod poly
+%define HashKey_24 (16*119) ; HashKey^24 <<1 mod poly
+%define HashKey_23 (16*120) ; HashKey^23 <<1 mod poly
+%define HashKey_22 (16*121) ; HashKey^22 <<1 mod poly
+%define HashKey_21 (16*122) ; HashKey^21 <<1 mod poly
+%define HashKey_20 (16*123) ; HashKey^20 <<1 mod poly
+%define HashKey_19 (16*124) ; HashKey^19 <<1 mod poly
+%define HashKey_18 (16*125) ; HashKey^18 <<1 mod poly
+%define HashKey_17 (16*126) ; HashKey^17 <<1 mod poly
+%define HashKey_16 (16*127) ; HashKey^16 <<1 mod poly
+%define HashKey_15 (16*128) ; HashKey^15 <<1 mod poly
+%define HashKey_14 (16*129) ; HashKey^14 <<1 mod poly
+%define HashKey_13 (16*130) ; HashKey^13 <<1 mod poly
+%define HashKey_12 (16*131) ; HashKey^12 <<1 mod poly
+%define HashKey_11 (16*132) ; HashKey^11 <<1 mod poly
+%define HashKey_10 (16*133) ; HashKey^10 <<1 mod poly
+%define HashKey_9 (16*134) ; HashKey^9 <<1 mod poly
+%define HashKey_8 (16*135) ; HashKey^8 <<1 mod poly
+%define HashKey_7 (16*136) ; HashKey^7 <<1 mod poly
+%define HashKey_6 (16*137) ; HashKey^6 <<1 mod poly
+%define HashKey_5 (16*138) ; HashKey^5 <<1 mod poly
+%define HashKey_4 (16*139) ; HashKey^4 <<1 mod poly
+%define HashKey_3 (16*140) ; HashKey^3 <<1 mod poly
+%define HashKey_2 (16*141) ; HashKey^2 <<1 mod poly
+%define HashKey_1 (16*142) ; HashKey <<1 mod poly
+%define HashKey (16*142) ; HashKey <<1 mod poly
+%else
+;;
+;; Key structure holds up to 48 ghash keys
+;;
+%define HashKey_48 (16*15) ; HashKey^48 <<1 mod poly
+%define HashKey_47 (16*16) ; HashKey^47 <<1 mod poly
+%define HashKey_46 (16*17) ; HashKey^46 <<1 mod poly
+%define HashKey_45 (16*18) ; HashKey^45 <<1 mod poly
+%define HashKey_44 (16*19) ; HashKey^44 <<1 mod poly
+%define HashKey_43 (16*20) ; HashKey^43 <<1 mod poly
+%define HashKey_42 (16*21) ; HashKey^42 <<1 mod poly
+%define HashKey_41 (16*22) ; HashKey^41 <<1 mod poly
+%define HashKey_40 (16*23) ; HashKey^40 <<1 mod poly
+%define HashKey_39 (16*24) ; HashKey^39 <<1 mod poly
+%define HashKey_38 (16*25) ; HashKey^38 <<1 mod poly
+%define HashKey_37 (16*26) ; HashKey^37 <<1 mod poly
+%define HashKey_36 (16*27) ; HashKey^36 <<1 mod poly
+%define HashKey_35 (16*28) ; HashKey^35 <<1 mod poly
+%define HashKey_34 (16*29) ; HashKey^34 <<1 mod poly
+%define HashKey_33 (16*30) ; HashKey^33 <<1 mod poly
+%define HashKey_32 (16*31) ; HashKey^32 <<1 mod poly
+%define HashKey_31 (16*32) ; HashKey^31 <<1 mod poly
+%define HashKey_30 (16*33) ; HashKey^30 <<1 mod poly
+%define HashKey_29 (16*34) ; HashKey^29 <<1 mod poly
+%define HashKey_28 (16*35) ; HashKey^28 <<1 mod poly
+%define HashKey_27 (16*36) ; HashKey^27 <<1 mod poly
+%define HashKey_26 (16*37) ; HashKey^26 <<1 mod poly
+%define HashKey_25 (16*38) ; HashKey^25 <<1 mod poly
+%define HashKey_24 (16*39) ; HashKey^24 <<1 mod poly
+%define HashKey_23 (16*40) ; HashKey^23 <<1 mod poly
+%define HashKey_22 (16*41) ; HashKey^22 <<1 mod poly
+%define HashKey_21 (16*42) ; HashKey^21 <<1 mod poly
+%define HashKey_20 (16*43) ; HashKey^20 <<1 mod poly
+%define HashKey_19 (16*44) ; HashKey^19 <<1 mod poly
+%define HashKey_18 (16*45) ; HashKey^18 <<1 mod poly
+%define HashKey_17 (16*46) ; HashKey^17 <<1 mod poly
+%define HashKey_16 (16*47) ; HashKey^16 <<1 mod poly
+%define HashKey_15 (16*48) ; HashKey^15 <<1 mod poly
+%define HashKey_14 (16*49) ; HashKey^14 <<1 mod poly
+%define HashKey_13 (16*50) ; HashKey^13 <<1 mod poly
+%define HashKey_12 (16*51) ; HashKey^12 <<1 mod poly
+%define HashKey_11 (16*52) ; HashKey^11 <<1 mod poly
+%define HashKey_10 (16*53) ; HashKey^10 <<1 mod poly
+%define HashKey_9 (16*54) ; HashKey^9 <<1 mod poly
+%define HashKey_8 (16*55) ; HashKey^8 <<1 mod poly
+%define HashKey_7 (16*56) ; HashKey^7 <<1 mod poly
+%define HashKey_6 (16*57) ; HashKey^6 <<1 mod poly
+%define HashKey_5 (16*58) ; HashKey^5 <<1 mod poly
+%define HashKey_4 (16*59) ; HashKey^4 <<1 mod poly
+%define HashKey_3 (16*60) ; HashKey^3 <<1 mod poly
+%define HashKey_2 (16*61) ; HashKey^2 <<1 mod poly
+%define HashKey_1 (16*62) ; HashKey <<1 mod poly
+%define HashKey (16*62) ; HashKey <<1 mod poly
+%endif ; !GCM_BIG_DATA
+
+%endif ; GCM_KEYS_VAES_AVX512_INCLUDED
diff --git a/src/spdk/intel-ipsec-mb/include/kasumi_internal.h b/src/spdk/intel-ipsec-mb/include/kasumi_internal.h
new file mode 100755
index 000000000..87b114d88
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/kasumi_internal.h
@@ -0,0 +1,1853 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+
+/*---------------------------------------------------------
+* Kasumi_internal.h
+*---------------------------------------------------------*/
+
+#ifndef _KASUMI_INTERNAL_H_
+#define _KASUMI_INTERNAL_H_
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "intel-ipsec-mb.h"
+#include "wireless_common.h"
+#include "include/clear_regs_mem.h"
+#include "include/constant_lookup.h"
+
+/*---------------------------------------------------------------------
+* Kasumi Inner S-Boxes
+*---------------------------------------------------------------------*/
+
+/* Table version based on a small table, no cache trash */
+static const uint16_t sso_kasumi_S7e[] = {
+ 0x6c00, 0x6601, 0x7802, 0x7603, 0x2404, 0x4e05, 0xb006, 0xce07,
+ 0x5c08, 0x1e09, 0x6a0a, 0xac0b, 0x1c0c, 0x3e0d, 0xea0e, 0x5c0f,
+ 0x4e10, 0xc011, 0x6a12, 0xc213, 0x0214, 0xac15, 0xae16, 0x3617,
+ 0x6e18, 0xa019, 0x681a, 0x001b, 0x0a1c, 0xe41d, 0xc41e, 0x9c1f,
+ 0x2a20, 0x5021, 0xb622, 0xd823, 0x2024, 0x3225, 0x3826, 0x2e27,
+ 0x9a28, 0xac29, 0x042a, 0xa62b, 0x882c, 0xd62d, 0xd22e, 0x082f,
+ 0x4830, 0x9631, 0xf432, 0x1c33, 0x4634, 0xb035, 0x7636, 0xa637,
+ 0xea38, 0x7039, 0x543a, 0x783b, 0xdc3c, 0x6e3d, 0xae3e, 0xba3f,
+ 0x6a40, 0x6a41, 0x1c42, 0x9043, 0x3a44, 0x5e45, 0x8c46, 0x7447,
+ 0x7c48, 0x5449, 0x384a, 0x1c4b, 0xa44c, 0xe84d, 0x604e, 0x304f,
+ 0x4050, 0xc451, 0x8652, 0xac53, 0x1654, 0xb655, 0x1856, 0x0657,
+ 0x0658, 0xa259, 0xf25a, 0x785b, 0xf85c, 0x785d, 0x845e, 0x3a5f,
+ 0x0c60, 0xfc61, 0xf062, 0x9c63, 0x5e64, 0xc265, 0x6666, 0x7667,
+ 0x9a68, 0x4669, 0x746a, 0xb46b, 0x506c, 0xe06d, 0x3a6e, 0x866f,
+ 0x6070, 0x3471, 0x3c72, 0xd673, 0x3474, 0x4c75, 0xa476, 0x7277,
+ 0xa478, 0xd479, 0xea7a, 0xa47b, 0x487c, 0x147d, 0x8a7e, 0xf87f,
+ 0x6c00, 0x6601, 0x7802, 0x7603, 0x2404, 0x4e05, 0xb006, 0xce07,
+ 0x5c08, 0x1e09, 0x6a0a, 0xac0b, 0x1c0c, 0x3e0d, 0xea0e, 0x5c0f,
+ 0x4e10, 0xc011, 0x6a12, 0xc213, 0x0214, 0xac15, 0xae16, 0x3617,
+ 0x6e18, 0xa019, 0x681a, 0x001b, 0x0a1c, 0xe41d, 0xc41e, 0x9c1f,
+ 0x2a20, 0x5021, 0xb622, 0xd823, 0x2024, 0x3225, 0x3826, 0x2e27,
+ 0x9a28, 0xac29, 0x042a, 0xa62b, 0x882c, 0xd62d, 0xd22e, 0x082f,
+ 0x4830, 0x9631, 0xf432, 0x1c33, 0x4634, 0xb035, 0x7636, 0xa637,
+ 0xea38, 0x7039, 0x543a, 0x783b, 0xdc3c, 0x6e3d, 0xae3e, 0xba3f,
+ 0x6a40, 0x6a41, 0x1c42, 0x9043, 0x3a44, 0x5e45, 0x8c46, 0x7447,
+ 0x7c48, 0x5449, 0x384a, 0x1c4b, 0xa44c, 0xe84d, 0x604e, 0x304f,
+ 0x4050, 0xc451, 0x8652, 0xac53, 0x1654, 0xb655, 0x1856, 0x0657,
+ 0x0658, 0xa259, 0xf25a, 0x785b, 0xf85c, 0x785d, 0x845e, 0x3a5f,
+ 0x0c60, 0xfc61, 0xf062, 0x9c63, 0x5e64, 0xc265, 0x6666, 0x7667,
+ 0x9a68, 0x4669, 0x746a, 0xb46b, 0x506c, 0xe06d, 0x3a6e, 0x866f,
+ 0x6070, 0x3471, 0x3c72, 0xd673, 0x3474, 0x4c75, 0xa476, 0x7277,
+ 0xa478, 0xd479, 0xea7a, 0xa47b, 0x487c, 0x147d, 0x8a7e, 0xf87f
+};
+
+static const uint16_t sso_kasumi_S9e[] = {
+ 0x4ea7, 0xdeef, 0x42a1, 0xf77b, 0x0f87, 0x9d4e, 0x1209, 0xa552,
+ 0x4c26, 0xc4e2, 0x6030, 0xcd66, 0x89c4, 0x0381, 0xb45a, 0x1b8d,
+ 0x6eb7, 0xfafd, 0x2693, 0x974b, 0x3f9f, 0xa954, 0x6633, 0xd56a,
+ 0x6532, 0xe9f4, 0x0d06, 0xa452, 0xb0d8, 0x3e9f, 0xc964, 0x62b1,
+ 0x5eaf, 0xe2f1, 0xd3e9, 0x4a25, 0x9cce, 0x2211, 0x0000, 0x9b4d,
+ 0x582c, 0xfcfe, 0xf57a, 0x743a, 0x1e8f, 0xb8dc, 0xa251, 0x2190,
+ 0xbe5f, 0x0603, 0x773b, 0xeaf5, 0x6c36, 0xd6eb, 0xb4da, 0x2b95,
+ 0xb1d8, 0x1108, 0x58ac, 0xddee, 0xe773, 0x4522, 0x1f8f, 0x984c,
+ 0x4aa5, 0x8ac5, 0x178b, 0xf279, 0x0301, 0xc1e0, 0x4fa7, 0xa8d4,
+ 0xe0f0, 0x381c, 0x9dce, 0x60b0, 0x2d96, 0xf7fb, 0x4120, 0xbedf,
+ 0xebf5, 0x2f97, 0xf2f9, 0x1309, 0xb259, 0x74ba, 0xbadd, 0x59ac,
+ 0x48a4, 0x944a, 0x71b8, 0x88c4, 0x95ca, 0x4ba5, 0xbd5e, 0x46a3,
+ 0xd0e8, 0x3c9e, 0x0c86, 0xc562, 0x1a0d, 0xf4fa, 0xd7eb, 0x1c8e,
+ 0x7ebf, 0x8a45, 0x82c1, 0x53a9, 0x3098, 0xc6e3, 0xdd6e, 0x0e87,
+ 0xb158, 0x592c, 0x2914, 0xe4f2, 0x6bb5, 0x8140, 0xe271, 0x2d16,
+ 0x160b, 0xe6f3, 0xae57, 0x7b3d, 0x4824, 0xba5d, 0xe1f0, 0x361b,
+ 0xcfe7, 0x7dbe, 0xc5e2, 0x5229, 0x8844, 0x389c, 0x93c9, 0x0683,
+ 0x8d46, 0x2793, 0xa753, 0x2814, 0x4e27, 0xe673, 0x75ba, 0xf87c,
+ 0xb7db, 0x0180, 0xf9fc, 0x6a35, 0xe070, 0x54aa, 0xbfdf, 0x2e97,
+ 0xfc7e, 0x52a9, 0x9249, 0x190c, 0x2f17, 0x8341, 0x50a8, 0xd96c,
+ 0xd76b, 0x4924, 0x5c2e, 0xe7f3, 0x1389, 0x8f47, 0x8944, 0x3018,
+ 0x91c8, 0x170b, 0x3a9d, 0x99cc, 0xd1e8, 0x55aa, 0x6b35, 0xcae5,
+ 0x6fb7, 0xf5fa, 0xa0d0, 0x1f0f, 0xbb5d, 0x2391, 0x65b2, 0xd8ec,
+ 0x2010, 0xa2d1, 0xcf67, 0x6834, 0x7038, 0xf078, 0x8ec7, 0x2b15,
+ 0xa3d1, 0x41a0, 0xf8fc, 0x3f1f, 0xecf6, 0x0c06, 0xa653, 0x6331,
+ 0x49a4, 0xb359, 0x3299, 0xedf6, 0x8241, 0x7a3d, 0xe8f4, 0x351a,
+ 0x5aad, 0xbcde, 0x45a2, 0x8643, 0x0582, 0xe170, 0x0b05, 0xca65,
+ 0xb9dc, 0x4723, 0x86c3, 0x5dae, 0x6231, 0x9e4f, 0x4ca6, 0x954a,
+ 0x3118, 0xff7f, 0xeb75, 0x0080, 0xfd7e, 0x3198, 0x369b, 0xdfef,
+ 0xdf6f, 0x0984, 0x2512, 0xd66b, 0x97cb, 0x43a1, 0x7c3e, 0x8dc6,
+ 0x0884, 0xc2e1, 0x96cb, 0x793c, 0xd4ea, 0x1c0e, 0x5b2d, 0xb65b,
+ 0xeff7, 0x3d1e, 0x51a8, 0xa6d3, 0xb75b, 0x6733, 0x188c, 0xed76,
+ 0x4623, 0xce67, 0xfa7d, 0x57ab, 0x2613, 0xacd6, 0x8bc5, 0x2492,
+ 0xe5f2, 0x753a, 0x79bc, 0xcce6, 0x0100, 0x9349, 0x8cc6, 0x3b1d,
+ 0x6432, 0xe874, 0x9c4e, 0x359a, 0x140a, 0x9acd, 0xfdfe, 0x56ab,
+ 0xcee7, 0x5a2d, 0x168b, 0xa7d3, 0x3a1d, 0xac56, 0xf3f9, 0x4020,
+ 0x9048, 0x341a, 0xad56, 0x2c96, 0x7339, 0xd5ea, 0x5faf, 0xdcee,
+ 0x379b, 0x8b45, 0x2a95, 0xb3d9, 0x5028, 0xee77, 0x5cae, 0xc763,
+ 0x72b9, 0xd2e9, 0x0b85, 0x8e47, 0x81c0, 0x2311, 0xe974, 0x6e37,
+ 0xdc6e, 0x64b2, 0x8542, 0x180c, 0xabd5, 0x1188, 0xe371, 0x7cbe,
+ 0x0201, 0xda6d, 0xef77, 0x1289, 0x6ab5, 0xb058, 0x964b, 0x6934,
+ 0x0904, 0xc9e4, 0xc462, 0x2110, 0xe572, 0x2713, 0x399c, 0xde6f,
+ 0xa150, 0x7d3e, 0x0804, 0xf1f8, 0xd9ec, 0x0703, 0x6130, 0x9a4d,
+ 0xa351, 0x67b3, 0x2a15, 0xcb65, 0x5f2f, 0x994c, 0xc7e3, 0x2412,
+ 0x5e2f, 0xaa55, 0x3219, 0xe3f1, 0xb5da, 0x4321, 0xc864, 0x1b0d,
+ 0x5128, 0xbdde, 0x1d0e, 0xd46a, 0x3e1f, 0xd068, 0x63b1, 0xa854,
+ 0x3d9e, 0xcde6, 0x158a, 0xc060, 0xc663, 0x349a, 0xffff, 0x2894,
+ 0x3b9d, 0xd369, 0x3399, 0xfeff, 0x44a2, 0xaed7, 0x5d2e, 0x92c9,
+ 0x150a, 0xbf5f, 0xaf57, 0x2090, 0x73b9, 0xdb6d, 0xd86c, 0x552a,
+ 0xf6fb, 0x4422, 0x6cb6, 0xfbfd, 0x148a, 0xa4d2, 0x9f4f, 0x0a85,
+ 0x6f37, 0xc160, 0x9148, 0x1a8d, 0x198c, 0xb55a, 0xf67b, 0x7f3f,
+ 0x85c2, 0x3319, 0x5bad, 0xc8e4, 0x77bb, 0xc3e1, 0xb85c, 0x2994,
+ 0xcbe5, 0x4da6, 0xf0f8, 0x5329, 0x2e17, 0xaad5, 0x0482, 0xa5d2,
+ 0x2c16, 0xb2d9, 0x371b, 0x8c46, 0x4d26, 0xd168, 0x47a3, 0xfe7f,
+ 0x7138, 0xf379, 0x0e07, 0xa9d4, 0x84c2, 0x0402, 0xea75, 0x4f27,
+ 0x9fcf, 0x0502, 0xc0e0, 0x7fbf, 0xeef7, 0x76bb, 0xa050, 0x1d8e,
+ 0x391c, 0xc361, 0xd269, 0x0d86, 0x572b, 0xafd7, 0xadd6, 0x70b8,
+ 0x7239, 0x90c8, 0xb95c, 0x7e3f, 0x98cc, 0x78bc, 0x4221, 0x87c3,
+ 0xc261, 0x3c1e, 0x6d36, 0xb6db, 0xbc5e, 0x40a0, 0x0281, 0xdbed,
+ 0x8040, 0x66b3, 0x0f07, 0xcc66, 0x7abd, 0x9ecf, 0xe472, 0x2592,
+ 0x6db6, 0xbbdd, 0x0783, 0xf47a, 0x80c0, 0x542a, 0xfb7d, 0x0a05,
+ 0x2291, 0xec76, 0x68b4, 0x83c1, 0x4b25, 0x8743, 0x1088, 0xf97c,
+ 0x562b, 0x8442, 0x783c, 0x8fc7, 0xab55, 0x7bbd, 0x94ca, 0x61b0,
+ 0x1008, 0xdaed, 0x1e0f, 0xf178, 0x69b4, 0xa1d0, 0x763b, 0x9bcd
+};
+
+/* Range of input data for KASUMI is from 1 to 20000 bits */
+#define KASUMI_MIN_LEN 1
+#define KASUMI_MAX_LEN 20000
+
+/* KASUMI cipher definitions */
+#define NUM_KASUMI_ROUNDS (8) /* 8 rounds in the kasumi spec */
+#define QWORDSIZEINBITS (64)
+#define QWORDSIZEINBYTES (8)
+#define LAST_PADDING_BIT (1)
+
+#define BYTESIZE (8)
+#define BITSIZE(x) ((int)(sizeof(x)*BYTESIZE))
+
+/*--------- 16 bit rotate left ------------------------------------------*/
+#define ROL16(a,b) (uint16_t)((a<<b)|(a>>(16-b)))
+
+/*----- a 64-bit structure to help with kasumi endian issues -----*/
+typedef union _ku64 {
+ uint64_t b64[1];
+ uint32_t b32[2];
+ uint16_t b16[4];
+ uint8_t b8[8];
+} kasumi_union_t;
+
+typedef union SafeBuffer {
+ uint64_t b64;
+ uint32_t b32[2];
+ uint8_t b8[KASUMI_BLOCK_SIZE];
+} SafeBuf;
+
+/*---------------------------------------------------------------------
+* Inline 16-bit left rotation
+*---------------------------------------------------------------------*/
+
+#define ROL16(a,b) (uint16_t)((a<<b)|(a>>(16-b)))
+
+#define FIp1(data, key1, key2, key3) \
+ do { \
+ uint16_t datal, datah; \
+ \
+ (data) ^= (key1); \
+ datal = LOOKUP16_SSE(sso_kasumi_S7e, (uint8_t)(data), 256); \
+ datah = LOOKUP16_SSE(sso_kasumi_S9e, (data) >> 7, 512); \
+ (data) = datal ^ datah; \
+ (data) ^= (key2); \
+ datal = LOOKUP16_SSE(sso_kasumi_S7e, (data) >> 9, 256); \
+ datah = LOOKUP16_SSE(sso_kasumi_S9e, (data) & 0x1FF, 512); \
+ (data) = datal ^ datah; \
+ (data) ^= (key3); \
+ } while (0)
+
+#define FIp2(data1, data2, key1, key2, key3, key4) \
+ do { \
+ FIp1(data1, key1, key2, key3); \
+ FIp1(data2, key1, key2, key4); \
+ } while (0)
+
+#define FLpi(key1, key2, res_h, res_l) \
+ do { \
+ uint16_t l, r; \
+ r = (res_l) & (key1); \
+ r = (res_h) ^ ROL16(r, 1); \
+ l = r | (key2); \
+ (res_h) = (res_l) ^ ROL16(l, 1); \
+ (res_l) = r; \
+ } while (0)
+
+#define FLp1(index, h, l) \
+ do { \
+ uint16_t ka = *(index + 0); \
+ uint16_t kb = *(index + 1); \
+ FLpi(ka, kb, h, l); \
+ } while (0)
+
+#define FLp2(index, h1, l1, h2, l2) \
+ do { \
+ uint16_t ka = *(index + 0); \
+ uint16_t kb = *(index + 1); \
+ FLpi(ka, kb, h1, l1); \
+ FLpi(ka, kb, h2, l2); \
+ } while (0)
+
+#define FLp3(index, h1, l1, h2, l2, h3, l3) \
+ do { \
+ uint16_t ka = *(index + 0); \
+ uint16_t kb = *(index + 1); \
+ FLpi(ka, kb, h1, l1); \
+ FLpi(ka, kb, h2, l2); \
+ FLpi(ka, kb, h3, l3); \
+ } while (0)
+
+#define FLp4(index, h1, l1, h2, l2, h3, l3, h4, l4) \
+ do { \
+ FLp2(index, h1, l1, h2, l2); \
+ FLp2(index, h3, l3, h4, l4); \
+ } while (0)
+
+#define FOp1(index, h, l) \
+ do { \
+ FIp1(h, *(index + 2), *(index + 3), l); \
+ FIp1(l, *(index + 4), *(index + 5), h); \
+ FIp1(h, *(index + 6), *(index + 7), l); \
+ } while (0)
+
+#define FOp2(index, h1, l1, h2, l2) \
+ do { \
+ uint16_t ka = *(index + 2); \
+ uint16_t kb = *(index + 3); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ ka = *(index + 4); \
+ kb = *(index + 5); \
+ FIp2(l1, l2, ka, kb, h1, h2); \
+ ka = *(index + 6); \
+ kb = *(index + 7); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ } while (0)
+
+#define FOp3(index, h1, l1, h2, l2, h3, l3) \
+ do { \
+ uint16_t ka = *(index + 2); \
+ uint16_t kb = *(index + 3); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ FIp1(h3, ka, kb, l3); \
+ ka = *(index + 4); \
+ kb = *(index + 5); \
+ FIp2(l1, l2, ka, kb, h1, h2); \
+ FIp1(l3, ka, kb, h3); \
+ ka = *(index + 6); \
+ kb = *(index + 7); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ FIp1(h3, ka, kb, l3); \
+ } while (0)
+
+#define FOp4(index, h1, l1, h2, l2, h3, l3, h4, l4) \
+ do { \
+ uint16_t ka = *(index + 2); \
+ uint16_t kb = *(index + 3); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ FIp2(h3, h4, ka, kb, l3, l4); \
+ ka = *(index + 4); \
+ kb = *(index + 5); \
+ FIp2(l1, l2, ka, kb, h1, h2); \
+ FIp2(l3, l4, ka, kb, h3, h4); \
+ ka = *(index + 6); \
+ kb = *(index + 7); \
+ FIp2(h1, h2, ka, kb, l1, l2); \
+ FIp2(h3, h4, ka, kb, l3, l4); \
+ } while (0)
+
+/**
+ *******************************************************************************
+ * @description
+ * This function performs the Kasumi operation on the given block using the key
+ * that is already scheduled in the context
+ *
+ * @param[in] pContext Context where the scheduled keys are stored
+ * @param[in/out] pData Block to be enc/dec
+ *
+ ******************************************************************************/
+static void kasumi_1_block(const uint16_t *context, uint16_t *data)
+{
+ const uint16_t *end = context + KASUMI_KEY_SCHEDULE_SIZE;
+ uint16_t temp_l, temp_h;
+
+ /* 4 iterations odd/even */
+ do {
+ temp_l = data[3];
+ temp_h = data[2];
+ FLp1(context, temp_h, temp_l);
+ FOp1(context, temp_h, temp_l);
+ context += 8;
+ data[1] ^= temp_l;
+ data[0] ^= temp_h;
+
+ temp_h = data[1];
+ temp_l = data[0];
+ FOp1(context, temp_h, temp_l);
+ FLp1(context, temp_h, temp_l);
+ context += 8;
+ data[3] ^= temp_h;
+ data[2] ^= temp_l;
+ } while (context < end);
+}
+
+/**
+ ******************************************************************************
+ * @description
+ * This function performs the Kasumi operation on the given blocks using the key
+ * that is already scheduled in the context
+ *
+ * @param[in] pContext Context where the scheduled keys are stored
+ * @param[in/out] pData1 First block to be enc/dec
+ * @param[in/out] pData2 Second block to be enc/dec
+ *
+ ******************************************************************************/
+static void
+kasumi_2_blocks(const uint16_t *context, uint16_t *data1, uint16_t *data2)
+{
+ const uint16_t *end = context + KASUMI_KEY_SCHEDULE_SIZE;
+ uint16_t temp1_l, temp1_h;
+ uint16_t temp2_l, temp2_h;
+
+ /* 4 iterations odd/even , with fine grain interleave */
+ do {
+ /* even */
+ temp1_l = data1[3];
+ temp1_h = data1[2];
+ temp2_l = data2[3];
+ temp2_h = data2[2];
+ FLp2(context, temp1_h, temp1_l, temp2_h, temp2_l);
+ FOp2(context, temp1_h, temp1_l, temp2_h, temp2_l);
+ context += 8;
+ data1[1] ^= temp1_l;
+ data1[0] ^= temp1_h;
+ data2[1] ^= temp2_l;
+ data2[0] ^= temp2_h;
+
+ /* odd */
+ temp1_h = data1[1];
+ temp1_l = data1[0];
+ temp2_h = data2[1];
+ temp2_l = data2[0];
+ FOp2(context, temp1_h, temp1_l, temp2_h, temp2_l);
+ FLp2(context, temp1_h, temp1_l, temp2_h, temp2_l);
+ context += 8;
+ data1[3] ^= temp1_h;
+ data1[2] ^= temp1_l;
+ data2[3] ^= temp2_h;
+ data2[2] ^= temp2_l;
+ } while (context < end);
+}
+
+
+/**
+ *******************************************************************************
+ * @description
+ * This function performs the Kasumi operation on the given blocks using the key
+ * that is already scheduled in the context
+ *
+ * @param[in] pContext Context where the scheduled keys are stored
+ * @param[in/out] pData1 First block to be enc/dec
+ * @param[in/out] pData2 Second block to be enc/dec
+ * @param[in/out] pData3 Third block to be enc/dec
+ *
+ ******************************************************************************/
+static void
+kasumi_3_blocks(const uint16_t *context, uint16_t *data1,
+ uint16_t *data2, uint16_t *data3)
+{
+ /* Case when the conmpiler is able to interleave efficiently */
+ const uint16_t *end = context + KASUMI_KEY_SCHEDULE_SIZE;
+ uint16_t temp1_l, temp1_h;
+ uint16_t temp2_l, temp2_h;
+ uint16_t temp3_l, temp3_h;
+
+ /* 4 iterations odd/even , with fine grain interleave */
+ do {
+ temp1_l = data1[3];
+ temp1_h = data1[2];
+ temp2_l = data2[3];
+ temp2_h = data2[2];
+ temp3_l = data3[3];
+ temp3_h = data3[2];
+ FLp3(context, temp1_h, temp1_l, temp2_h, temp2_l, temp3_h,
+ temp3_l);
+ FOp3(context, temp1_h, temp1_l, temp2_h, temp2_l, temp3_h,
+ temp3_l);
+ context += 8;
+ data1[1] ^= temp1_l;
+ data1[0] ^= temp1_h;
+ data2[1] ^= temp2_l;
+ data2[0] ^= temp2_h;
+ data3[1] ^= temp3_l;
+ data3[0] ^= temp3_h;
+
+ temp1_h = data1[1];
+ temp1_l = data1[0];
+ temp2_h = data2[1];
+ temp2_l = data2[0];
+ temp3_h = data3[1];
+ temp3_l = data3[0];
+ FOp3(context, temp1_h, temp1_l, temp2_h, temp2_l, temp3_h,
+ temp3_l);
+ FLp3(context, temp1_h, temp1_l, temp2_h, temp2_l, temp3_h,
+ temp3_l);
+ context += 8;
+ data1[3] ^= temp1_h;
+ data1[2] ^= temp1_l;
+ data2[3] ^= temp2_h;
+ data2[2] ^= temp2_l;
+ data3[3] ^= temp3_h;
+ data3[2] ^= temp3_l;
+ } while (context < end);
+}
+
+/**
+ *******************************************************************************
+ * @description
+ * This function performs the Kasumi operation on the given blocks using the key
+ * that is already scheduled in the context
+ *
+ * @param[in] pContext Context where the scheduled keys are stored
+ * @param[in] ppData Pointer to an array of addresses of blocks
+ *
+ ******************************************************************************/
+static void
+kasumi_4_blocks(const uint16_t *context, uint16_t **ppData)
+{
+ /* Case when the conmpiler is unable to interleave efficiently */
+ kasumi_2_blocks (context, ppData[0], ppData[1]);
+ kasumi_2_blocks (context, ppData[2], ppData[3]);
+}
+
+/**
+ ******************************************************************************
+ * @description
+ * This function performs the Kasumi operation on the given blocks using the key
+ * that is already scheduled in the context
+ *
+ * @param[in] pContext Context where the scheduled keys are stored
+ * @param[in] ppData Pointer to an array of addresses of blocks
+ *
+ ******************************************************************************/
+static void
+kasumi_8_blocks(const uint16_t *context, uint16_t **ppData)
+{
+ kasumi_4_blocks (context, &ppData[0]);
+ kasumi_4_blocks (context, &ppData[4]);
+}
+
+/******************************************************************************
+* @description
+* Multiple wrappers for the Kasumi rounds on up to 16 blocks of 64 bits at a
+*time.
+*
+* Depending on the variable packet lengths, different wrappers get called.
+* It has been measured that 1 packet is faster than 2, 2 packets is faster
+*than 3
+* 3 packets is faster than 4, and so on ...
+* It has also been measured that 6 = 4+2 packets is faster than 8
+* It has also been measured that 7 packets are processed faster as 8 packets,
+*
+* If the assumptions are not verified, it is easy to implmement
+* the right function and reference it in wrapperArray.
+*
+*******************************************************************************/
+static void
+kasumi_f8_1_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_1_block(context, data[0]);
+}
+
+static void
+kasumi_f8_2_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_2_blocks(context, data[0], data[1]);
+}
+
+static void
+kasumi_f8_3_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_3_blocks(context, data[0], data[1], data[2]);
+}
+
+static void
+kasumi_f8_5_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_4_blocks(context, &data[0]);
+ kasumi_1_block(context, data[4]);
+}
+
+static void
+kasumi_f8_6_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ /* It is also assumed 6 = 4+2 packets is faster than 8 */
+ kasumi_4_blocks(context, &data[0]);
+ kasumi_2_blocks(context, data[4], data[5]);
+}
+
+static void
+kasumi_f8_7_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_4_blocks(context, &data[0]);
+ kasumi_3_blocks(context, data[4], data[5], data[6]);
+}
+
+static void
+kasumi_f8_9_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_1_block(context, data[8]);
+}
+
+static void
+kasumi_f8_10_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_2_blocks(context, data[8], data[9]);
+}
+
+static void
+kasumi_f8_11_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_3_blocks(context, data[8], data[9], data[10]);
+}
+
+static void
+kasumi_f8_12_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_4_blocks(context, &data[8]);
+}
+
+static void
+kasumi_f8_13_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_4_blocks(context, &data[8]);
+ kasumi_1_block(context, data[12]);
+}
+
+static void
+kasumi_f8_14_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_4_blocks(context, &data[8]);
+ kasumi_2_blocks(context, data[12], data[13]);
+}
+
+static void
+kasumi_f8_15_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_4_blocks(context, &data[8]);
+ kasumi_3_blocks(context, data[12], data[13], data[14]);
+}
+
+static void
+kasumi_f8_16_buffer_wrapper(const uint16_t *context, uint16_t **data)
+{
+ kasumi_8_blocks(context, &data[0]);
+ kasumi_8_blocks(context, &data[8]);
+}
+
+typedef void (*kasumi_wrapper_t)(const uint16_t *, uint16_t **);
+
+static kasumi_wrapper_t kasumiWrapperArray[] = {
+ NULL,
+ kasumi_f8_1_buffer_wrapper,
+ kasumi_f8_2_buffer_wrapper,
+ kasumi_f8_3_buffer_wrapper,
+ kasumi_4_blocks,
+ kasumi_f8_5_buffer_wrapper,
+ kasumi_f8_6_buffer_wrapper,
+ kasumi_f8_7_buffer_wrapper,
+ kasumi_8_blocks,
+ kasumi_f8_9_buffer_wrapper,
+ kasumi_f8_10_buffer_wrapper,
+ kasumi_f8_11_buffer_wrapper,
+ kasumi_f8_12_buffer_wrapper,
+ kasumi_f8_13_buffer_wrapper,
+ kasumi_f8_14_buffer_wrapper,
+ kasumi_f8_15_buffer_wrapper,
+ kasumi_f8_16_buffer_wrapper};
+
+/*---------------------------------------------------------------------
+* kasumi_key_schedule_sk()
+* Build the key schedule. Most "key" operations use 16-bit
+*
+* Context is a flat array of 64 uint16. The context is built in the same order
+* it will be used.
+*---------------------------------------------------------------------*/
+static inline void
+kasumi_key_schedule_sk(uint16_t *context, const void *pKey)
+{
+
+ /* Kasumi constants*/
+ static const uint16_t C[] = {0x0123, 0x4567, 0x89AB, 0xCDEF,
+ 0xFEDC, 0xBA98, 0x7654, 0x3210};
+
+ uint16_t k[8], kprime[8], n;
+ const uint8_t *pk = (const uint8_t *) pKey;
+
+ /* Build K[] and K'[] keys */
+ for (n = 0; n < 8; n++, pk += 2) {
+ k[n] = (pk[0] << 8) + pk[1];
+ kprime[n] = k[n] ^ C[n];
+ }
+
+ /*
+ * Finally construct the various sub keys [Kli1, KlO ...) in the right
+ * order for easy usage at run-time
+ */
+ for (n = 0; n < 8; n++) {
+ context[0] = ROL16(k[n], 1);
+ context[1] = kprime[(n + 2) & 0x7];
+ context[2] = ROL16(k[(n + 1) & 0x7], 5);
+ context[3] = kprime[(n + 4) & 0x7];
+ context[4] = ROL16(k[(n + 5) & 0x7], 8);
+ context[5] = kprime[(n + 3) & 0x7];
+ context[6] = ROL16(k[(n + 6) & 0x7], 13);
+ context[7] = kprime[(n + 7) & 0x7];
+ context += 8;
+ }
+#ifdef SAFE_DATA
+ clear_mem(k, sizeof(k));
+ clear_mem(kprime, sizeof(kprime));
+#endif
+}
+
+/*---------------------------------------------------------------------
+* kasumi_compute_sched()
+* Generic ksaumi key sched init function.
+*
+*---------------------------------------------------------------------*/
+static inline int
+kasumi_compute_sched(const uint8_t modifier,
+ const void *const pKey, void *pCtx)
+{
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pCtx == NULL)
+ return -1;
+#endif
+ uint32_t i = 0;
+ const uint8_t *const key = (const uint8_t * const)pKey;
+ uint8_t ModKey[KASUMI_KEY_SIZE] = {0}; /* Modified key */
+ kasumi_key_sched_t *pLocalCtx = (kasumi_key_sched_t *)pCtx;
+
+ /* Construct the modified key*/
+ for (i = 0; i < KASUMI_KEY_SIZE; i++)
+ ModKey[i] = (uint8_t)key[i] ^ modifier;
+
+ kasumi_key_schedule_sk(pLocalCtx->sk16, pKey);
+ kasumi_key_schedule_sk(pLocalCtx->msk16, ModKey);
+
+#ifdef SAFE_DATA
+ clear_mem(ModKey, sizeof(ModKey));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+ return 0;
+}
+
+/*---------------------------------------------------------------------
+* kasumi_key_sched_size()
+* Get the size of a kasumi key sched context.
+*
+*---------------------------------------------------------------------*/
+static inline size_t
+kasumi_key_sched_size(void)
+{
+ /*
+ * There are two keys that need to be scheduled: the original one and
+ * the modified one (xored with the relevant modifier)
+ */
+ return sizeof(kasumi_key_sched_t);
+}
+
+/*---------------------------------------------------------------------
+* kasumi_init_f8_key_sched()
+* Compute the kasumi f8 key schedule.
+*
+*---------------------------------------------------------------------*/
+
+static inline int
+kasumi_init_f8_key_sched(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_compute_sched(0x55, pKey, pCtx);
+}
+
+/*---------------------------------------------------------------------
+* kasumi_init_f9_key_sched()
+* Compute the kasumi f9 key schedule.
+*
+*---------------------------------------------------------------------*/
+
+static inline int
+kasumi_init_f9_key_sched(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_compute_sched(0xAA, pKey, pCtx);
+}
+
+size_t
+kasumi_key_sched_size_sse(void);
+
+int
+kasumi_init_f8_key_sched_sse(const void *pKey, kasumi_key_sched_t *pCtx);
+
+int
+kasumi_init_f9_key_sched_sse(const void *pKey, kasumi_key_sched_t *pCtx);
+
+size_t
+kasumi_key_sched_size_avx(void);
+
+int
+kasumi_init_f8_key_sched_avx(const void *pKey, kasumi_key_sched_t *pCtx);
+
+int
+kasumi_init_f9_key_sched_avx(const void *pKey, kasumi_key_sched_t *pCtx);
+
+
+static inline void
+kasumi_f8_1_buffer(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pIn, void *pOut,
+ const uint32_t length)
+{
+ uint32_t blkcnt;
+ kasumi_union_t a, b; /* the modifier */
+ SafeBuf safeInBuf;
+ const uint8_t *pBufferIn = (const uint8_t *) pIn;
+ uint8_t *pBufferOut = (uint8_t *) pOut;
+ uint32_t lengthInBytes = length;
+
+ /* IV Endianity */
+ a.b64[0] = BSWAP64(IV);
+
+ /* First encryption to create modifier */
+ kasumi_1_block(pCtx->msk16, a.b16 );
+
+ /* Final initialisation steps */
+ blkcnt = 0;
+ b.b64[0] = a.b64[0];
+
+ /* Now run the block cipher */
+ while (lengthInBytes) {
+ /* KASUMI it to produce the next block of keystream */
+ kasumi_1_block(pCtx->sk16, b.b16 );
+
+ if (lengthInBytes > KASUMI_BLOCK_SIZE) {
+ pBufferIn = xor_keystrm_rev(pBufferOut, pBufferIn,
+ b.b64[0]);
+ pBufferOut += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+ /* done another 64 bits */
+ lengthInBytes -= KASUMI_BLOCK_SIZE;
+
+ /* apply the modifier and update the block count */
+ b.b64[0] ^= a.b64[0];
+ b.b16[0] ^= (uint16_t)++blkcnt;
+ } else if (lengthInBytes < KASUMI_BLOCK_SIZE) {
+ /* end of the loop, handle the last bytes */
+ memcpy_keystrm(safeInBuf.b8, pBufferIn,
+ lengthInBytes);
+ xor_keystrm_rev(b.b8, safeInBuf.b8, b.b64[0]);
+ memcpy_keystrm(pBufferOut, b.b8, lengthInBytes);
+ lengthInBytes = 0;
+ /* lengthInBytes == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut, pBufferIn, b.b64[0]);
+ lengthInBytes = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a, sizeof(a));
+ clear_mem(&b, sizeof(b));
+ clear_mem(&safeInBuf, sizeof(safeInBuf));
+#endif
+}
+
+static inline void
+preserve_bits(kasumi_union_t *c,
+ const uint8_t *pcBufferOut, const uint8_t *pcBufferIn,
+ SafeBuf *safeOutBuf, SafeBuf *safeInBuf,
+ const uint8_t bit_len, const uint8_t byte_len)
+{
+ const uint64_t mask = UINT64_MAX << (KASUMI_BLOCK_SIZE * 8 - bit_len);
+
+ /* Clear the last bits of the keystream and the input
+ * (input only in out-of-place case) */
+ c->b64[0] &= mask;
+ if (pcBufferIn != pcBufferOut) {
+ const uint64_t swapMask = BSWAP64(mask);
+
+ safeInBuf->b64 &= swapMask;
+
+ /*
+ * Merge the last bits from the output, to be preserved,
+ * in the keystream, to be XOR'd with the input
+ * (which last bits are 0, maintaining the output bits)
+ */
+ memcpy_keystrm(safeOutBuf->b8, pcBufferOut, byte_len);
+ c->b64[0] |= BSWAP64(safeOutBuf->b64 & ~swapMask);
+ }
+}
+
+static inline void
+kasumi_f8_1_buffer_bit(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pIn, void *pOut,
+ const uint32_t lengthInBits,
+ const uint32_t offsetInBits)
+{
+ const uint8_t *pBufferIn = (const uint8_t *) pIn;
+ uint8_t *pBufferOut = (uint8_t *) pOut;
+ uint32_t cipherLengthInBits = lengthInBits;
+ uint32_t blkcnt;
+ uint64_t shiftrem = 0;
+ kasumi_union_t a, b, c; /* the modifier */
+ const uint8_t *pcBufferIn = pBufferIn + (offsetInBits / 8);
+ uint8_t *pcBufferOut = pBufferOut + (offsetInBits / 8);
+ /* Offset into the first byte (0 - 7 bits) */
+ uint32_t remainOffset = offsetInBits % 8;
+ uint32_t byteLength = (cipherLengthInBits + 7) / 8;
+ SafeBuf safeOutBuf;
+ SafeBuf safeInBuf;
+
+ /* IV Endianity */
+ a.b64[0] = BSWAP64(IV);
+
+ /* First encryption to create modifier */
+ kasumi_1_block(pCtx->msk16, a.b16);
+
+ /* Final initialisation steps */
+ blkcnt = 0;
+ b.b64[0] = a.b64[0];
+ /* Now run the block cipher */
+
+ /* Start with potential partial block (due to offset and length) */
+ kasumi_1_block(pCtx->sk16, b.b16);
+ c.b64[0] = b.b64[0] >> remainOffset;
+ /* Only one block to encrypt */
+ if (cipherLengthInBits < (64 - remainOffset)) {
+ byteLength = (cipherLengthInBits + 7) / 8;
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn, byteLength);
+ /*
+ * If operation is Out-of-place and there is offset
+ * to be applied, "remainOffset" bits from the output buffer
+ * need to be preserved (only applicable to first byte,
+ * since remainOffset is up to 7 bits)
+ */
+ if ((pIn != pOut) && remainOffset) {
+ const uint8_t mask8 =
+ (const uint8_t)(1 << (8 - remainOffset)) - 1;
+
+ safeInBuf.b8[0] = (safeInBuf.b8[0] & mask8) |
+ (pcBufferOut[0] & ~mask8);
+ }
+
+ /* If last byte is a partial byte, the last bits of the output
+ * need to be preserved */
+ const uint8_t bitlen_with_off = remainOffset +
+ cipherLengthInBits;
+
+ if ((bitlen_with_off & 0x7) != 0) {
+ preserve_bits(&c, pcBufferOut, pcBufferIn, &safeOutBuf,
+ &safeInBuf, bitlen_with_off, byteLength);
+ }
+ xor_keystrm_rev(safeOutBuf.b8, safeInBuf.b8, c.b64[0]);
+ memcpy_keystrm(pcBufferOut, safeOutBuf.b8, byteLength);
+ return;
+ }
+
+ /*
+ * If operation is Out-of-place and there is offset
+ * to be applied, "remainOffset" bits from the output buffer
+ * need to be preserved (only applicable to first byte,
+ * since remainOffset is up to 7 bits)
+ */
+ if ((pIn != pOut) && remainOffset) {
+ const uint8_t mask8 =
+ (const uint8_t)(1 << (8 - remainOffset)) - 1;
+
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn, 8);
+ safeInBuf.b8[0] = (safeInBuf.b8[0] & mask8) |
+ (pcBufferOut[0] & ~mask8);
+ xor_keystrm_rev(pcBufferOut, safeInBuf.b8, c.b64[0]);
+ pcBufferIn += KASUMI_BLOCK_SIZE;
+ } else {
+ /* At least 64 bits to produce (including offset) */
+ pcBufferIn = xor_keystrm_rev(pcBufferOut, pcBufferIn, c.b64[0]);
+ }
+
+ if (remainOffset != 0)
+ shiftrem = b.b64[0] << (64 - remainOffset);
+ cipherLengthInBits -= KASUMI_BLOCK_SIZE * 8 - remainOffset;
+ pcBufferOut += KASUMI_BLOCK_SIZE;
+ /* apply the modifier and update the block count */
+ b.b64[0] ^= a.b64[0];
+ b.b16[0] ^= (uint16_t)++blkcnt;
+
+ while (cipherLengthInBits) {
+ /* KASUMI it to produce the next block of keystream */
+ kasumi_1_block(pCtx->sk16, b.b16);
+ c.b64[0] = (b.b64[0] >> remainOffset) | shiftrem;
+ if (remainOffset != 0)
+ shiftrem = b.b64[0] << (64 - remainOffset);
+ if (cipherLengthInBits >= KASUMI_BLOCK_SIZE * 8) {
+ pcBufferIn = xor_keystrm_rev(pcBufferOut,
+ pcBufferIn, c.b64[0]);
+ cipherLengthInBits -= KASUMI_BLOCK_SIZE * 8;
+ pcBufferOut += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+
+ /* apply the modifier and update the block count */
+ b.b64[0] ^= a.b64[0];
+ b.b16[0] ^= (uint16_t)++blkcnt;
+ } else {
+ /* end of the loop, handle the last bytes */
+ byteLength = (cipherLengthInBits + 7) / 8;
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn,
+ byteLength);
+
+ /* If last byte is a partial byte, the last bits
+ * of the output need to be preserved */
+ if ((cipherLengthInBits & 0x7) != 0)
+ preserve_bits(&c, pcBufferOut, pcBufferIn,
+ &safeOutBuf, &safeInBuf,
+ cipherLengthInBits, byteLength);
+ xor_keystrm_rev(safeOutBuf.b8, safeInBuf.b8, c.b64[0]);
+ memcpy_keystrm(pcBufferOut, safeOutBuf.b8, byteLength);
+ cipherLengthInBits = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a, sizeof(a));
+ clear_mem(&b, sizeof(b));
+ clear_mem(&c, sizeof(c));
+ clear_mem(&safeInBuf, sizeof(safeInBuf));
+ clear_mem(&safeOutBuf, sizeof(safeOutBuf));
+#endif
+}
+
+static inline void
+kasumi_f8_2_buffer(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const void *pIn1, void *pOut1,
+ const uint32_t length1,
+ const void *pIn2, void *pOut2,
+ const uint32_t length2)
+{
+ const uint8_t *pBufferIn1 = (const uint8_t *) pIn1;
+ uint8_t *pBufferOut1 = (uint8_t *) pOut1;
+ uint32_t lengthInBytes1 = length1;
+ const uint8_t *pBufferIn2 = (const uint8_t *) pIn2;
+ uint8_t *pBufferOut2 = (uint8_t *) pOut2;
+ uint32_t lengthInBytes2 = length2;
+ uint32_t blkcnt, length;
+ kasumi_union_t a1, b1; /* the modifier */
+ kasumi_union_t a2, b2; /* the modifier */
+ SafeBuf safeInBuf;
+
+ kasumi_union_t temp;
+
+ /* IV Endianity */
+ a1.b64[0] = BSWAP64(IV1);
+ a2.b64[0] = BSWAP64(IV2);
+
+ kasumi_2_blocks(pCtx->msk16, a1.b16, a2.b16);
+
+ /* Final initialisation steps */
+ blkcnt = 0;
+ b1.b64[0] = a1.b64[0];
+ b2.b64[0] = a2.b64[0];
+
+ /* check which packet is longer and save "common" shortest length */
+ if (lengthInBytes1 > lengthInBytes2)
+ length = lengthInBytes2;
+ else
+ length = lengthInBytes1;
+
+ /* Round down to to a whole number of qwords. (QWORDLENGTHINBYTES-1 */
+ length &= ~7;
+ lengthInBytes1 -= length;
+ lengthInBytes2 -= length;
+
+ /* Now run the block cipher for common packet length, a whole number of
+ * blocks */
+ while (length) {
+ /* KASUMI it to produce the next block of keystream for both
+ * packets */
+ kasumi_2_blocks(pCtx->sk16, b1.b16, b2.b16);
+
+ /* xor and write keystream */
+ pBufferIn1 =
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ pBufferOut1 += KASUMI_BLOCK_SIZE;
+ pBufferIn2 =
+ xor_keystrm_rev(pBufferOut2, pBufferIn2, b2.b64[0]);
+ pBufferOut2 += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+ length -= KASUMI_BLOCK_SIZE; /* done another 64 bits */
+
+ /* apply the modifier and update the block count */
+ b1.b64[0] ^= a1.b64[0];
+ b1.b16[0] ^= (uint16_t)++blkcnt;
+ b2.b64[0] ^= a2.b64[0];
+ b2.b16[0] ^= (uint16_t)blkcnt;
+ }
+
+ /*
+ * Process common part at end of first packet and second packet.
+ * One of the packets has a length less than 8 bytes.
+ */
+ if (lengthInBytes1 > 0 && lengthInBytes2 > 0) {
+ /* final round for 1 of the packets */
+ kasumi_2_blocks(pCtx->sk16, b1.b16, b2.b16);
+ if (lengthInBytes1 > KASUMI_BLOCK_SIZE) {
+ pBufferIn1 = xor_keystrm_rev(pBufferOut1,
+ pBufferIn1, b1.b64[0]);
+ pBufferOut1 += KASUMI_BLOCK_SIZE;
+ b1.b64[0] ^= a1.b64[0];
+ b1.b16[0] ^= (uint16_t)++blkcnt;
+ lengthInBytes1 -= KASUMI_BLOCK_SIZE;
+ } else if (lengthInBytes1 < KASUMI_BLOCK_SIZE) {
+ memcpy_keystrm(safeInBuf.b8, pBufferIn1,
+ lengthInBytes1);
+ xor_keystrm_rev(temp.b8, safeInBuf.b8, b1.b64[0]);
+ memcpy_keystrm(pBufferOut1, temp.b8,
+ lengthInBytes1);
+ lengthInBytes1 = 0;
+ /* lengthInBytes1 == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ lengthInBytes1 = 0;
+ }
+ if (lengthInBytes2 > KASUMI_BLOCK_SIZE) {
+ pBufferIn2 = xor_keystrm_rev(pBufferOut2,
+ pBufferIn2, b2.b64[0]);
+ pBufferOut2 += KASUMI_BLOCK_SIZE;
+ b2.b64[0] ^= a2.b64[0];
+ b2.b16[0] ^= (uint16_t)++blkcnt;
+ lengthInBytes2 -= KASUMI_BLOCK_SIZE;
+ } else if (lengthInBytes2 < KASUMI_BLOCK_SIZE) {
+ memcpy_keystrm(safeInBuf.b8, pBufferIn2,
+ lengthInBytes2);
+ xor_keystrm_rev(temp.b8, safeInBuf.b8, b2.b64[0]);
+ memcpy_keystrm(pBufferOut2, temp.b8,
+ lengthInBytes2);
+ lengthInBytes2 = 0;
+ /* lengthInBytes2 == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut2, pBufferIn2, b2.b64[0]);
+ lengthInBytes2 = 0;
+ }
+ }
+
+ if (lengthInBytes1 < lengthInBytes2) {
+ /* packet 2 is not completed since lengthInBytes2 > 0
+ * packet 1 has less than 8 bytes.
+ */
+ if (lengthInBytes1) {
+ kasumi_1_block(pCtx->sk16, b1.b16);
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ }
+ /* move pointers to right variables for packet 1 */
+ lengthInBytes1 = lengthInBytes2;
+ b1.b64[0] = b2.b64[0];
+ a1.b64[0] = a2.b64[0];
+ pBufferIn1 = pBufferIn2;
+ pBufferOut1 = pBufferOut2;
+ } else { /* lengthInBytes1 >= lengthInBytes2 */
+ if (!lengthInBytes1)
+ /* both packets are completed */
+ return;
+ /* process the remaining of packet 2 */
+ if (lengthInBytes2) {
+ kasumi_1_block(pCtx->sk16, b2.b16);
+ xor_keystrm_rev(pBufferOut2, pBufferIn2, b2.b64[0]);
+ }
+ /* packet 1 is not completed */
+ }
+
+ /* process the length difference from ipkt1 and pkt2 */
+ while (lengthInBytes1) {
+ /* KASUMI it to produce the next block of keystream */
+ kasumi_1_block(pCtx->sk16, b1.b16);
+
+ if (lengthInBytes1 > KASUMI_BLOCK_SIZE) {
+ pBufferIn1 = xor_keystrm_rev(pBufferOut1,
+ pBufferIn1, b1.b64[0]);
+ pBufferOut1 += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+ lengthInBytes1 -= KASUMI_BLOCK_SIZE;
+
+ /* apply the modifier and update the block count */
+ b1.b64[0] ^= a1.b64[0];
+ b1.b16[0] ^= (uint16_t)++blkcnt;
+ } else if (lengthInBytes1 < KASUMI_BLOCK_SIZE) {
+ /* end of the loop, handle the last bytes */
+ memcpy_keystrm(safeInBuf.b8, pBufferIn1,
+ lengthInBytes1);
+ xor_keystrm_rev(temp.b8, safeInBuf.b8, b1.b64[0]);
+ memcpy_keystrm(pBufferOut1, temp.b8,
+ lengthInBytes1);
+ lengthInBytes1 = 0;
+ /* lengthInBytes1 == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ lengthInBytes1 = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a1, sizeof(a1));
+ clear_mem(&b1, sizeof(b1));
+ clear_mem(&a2, sizeof(a2));
+ clear_mem(&b2, sizeof(b2));
+ clear_mem(&temp, sizeof(temp));
+ clear_mem(&safeInBuf, sizeof(safeInBuf));
+#endif
+}
+
+static inline void
+kasumi_f8_3_buffer(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2, const uint64_t IV3,
+ const void *pIn1, void *pOut1,
+ const void *pIn2, void *pOut2,
+ const void *pIn3, void *pOut3,
+ const uint32_t length)
+{
+ const uint8_t *pBufferIn1 = (const uint8_t *) pIn1;
+ uint8_t *pBufferOut1 = (uint8_t *) pOut1;
+ const uint8_t *pBufferIn2 = (const uint8_t *) pIn2;
+ uint8_t *pBufferOut2 = (uint8_t *) pOut2;
+ const uint8_t *pBufferIn3 = (const uint8_t *) pIn3;
+ uint8_t *pBufferOut3 = (uint8_t *) pOut3;
+ uint32_t lengthInBytes = length;
+ uint32_t blkcnt;
+ kasumi_union_t a1, b1; /* the modifier */
+ kasumi_union_t a2, b2; /* the modifier */
+ kasumi_union_t a3, b3; /* the modifier */
+ SafeBuf safeInBuf1, safeInBuf2, safeInBuf3;
+
+ /* IV Endianity */
+ a1.b64[0] = BSWAP64(IV1);
+ a2.b64[0] = BSWAP64(IV2);
+ a3.b64[0] = BSWAP64(IV3);
+
+ kasumi_3_blocks(pCtx->msk16, a1.b16, a2.b16, a3.b16);
+
+ /* Final initialisation steps */
+ blkcnt = 0;
+ b1.b64[0] = a1.b64[0];
+ b2.b64[0] = a2.b64[0];
+ b3.b64[0] = a3.b64[0];
+
+ /* Now run the block cipher for common packet lengthInBytes, a whole
+ * number of blocks */
+ while (lengthInBytes) {
+ /* KASUMI it to produce the next block of keystream for all the
+ * packets */
+ kasumi_3_blocks(pCtx->sk16, b1.b16, b2.b16, b3.b16);
+
+ if (lengthInBytes > KASUMI_BLOCK_SIZE) {
+ /* xor and write keystream */
+ pBufferIn1 = xor_keystrm_rev(pBufferOut1,
+ pBufferIn1, b1.b64[0]);
+ pBufferOut1 += KASUMI_BLOCK_SIZE;
+ pBufferIn2 = xor_keystrm_rev(pBufferOut2,
+ pBufferIn2, b2.b64[0]);
+ pBufferOut2 += KASUMI_BLOCK_SIZE;
+ pBufferIn3 = xor_keystrm_rev(pBufferOut3,
+ pBufferIn3, b3.b64[0]);
+ pBufferOut3 += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+ lengthInBytes -= KASUMI_BLOCK_SIZE;
+
+ /* apply the modifier and update the block count */
+ b1.b64[0] ^= a1.b64[0];
+ b1.b16[0] ^= (uint16_t)++blkcnt;
+ b2.b64[0] ^= a2.b64[0];
+ b2.b16[0] ^= (uint16_t)blkcnt;
+ b3.b64[0] ^= a3.b64[0];
+ b3.b16[0] ^= (uint16_t)blkcnt;
+ } else if (lengthInBytes < KASUMI_BLOCK_SIZE) {
+ /* end of the loop, handle the last bytes */
+ memcpy_keystrm(safeInBuf1.b8, pBufferIn1,
+ lengthInBytes);
+ xor_keystrm_rev(b1.b8, safeInBuf1.b8, b1.b64[0]);
+ memcpy_keystrm(pBufferOut1, b1.b8, lengthInBytes);
+
+ memcpy_keystrm(safeInBuf2.b8, pBufferIn2,
+ lengthInBytes);
+ xor_keystrm_rev(b2.b8, safeInBuf2.b8, b2.b64[0]);
+ memcpy_keystrm(pBufferOut2, b2.b8, lengthInBytes);
+
+ memcpy_keystrm(safeInBuf3.b8, pBufferIn3,
+ lengthInBytes);
+ xor_keystrm_rev(b3.b8, safeInBuf3.b8, b3.b64[0]);
+ memcpy_keystrm(pBufferOut3, b3.b8, lengthInBytes);
+ lengthInBytes = 0;
+ /* lengthInBytes == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ xor_keystrm_rev(pBufferOut2, pBufferIn2, b2.b64[0]);
+ xor_keystrm_rev(pBufferOut3, pBufferIn3, b3.b64[0]);
+ lengthInBytes = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a1, sizeof(a1));
+ clear_mem(&b1, sizeof(b1));
+ clear_mem(&a2, sizeof(a2));
+ clear_mem(&b2, sizeof(b2));
+ clear_mem(&a3, sizeof(a3));
+ clear_mem(&b3, sizeof(b3));
+ clear_mem(&safeInBuf1, sizeof(safeInBuf1));
+ clear_mem(&safeInBuf2, sizeof(safeInBuf2));
+ clear_mem(&safeInBuf3, sizeof(safeInBuf3));
+#endif
+}
+
+/*---------------------------------------------------------
+* @description
+* Kasumi F8 4 packet:
+* Four packets enc/dec with the same key schedule.
+* The 4 Ivs are independent and are passed as an array of values
+* The packets are separate, the datalength is common
+*---------------------------------------------------------*/
+
+static inline void
+kasumi_f8_4_buffer(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const uint64_t IV3, const uint64_t IV4,
+ const void *pIn1, void *pOut1,
+ const void *pIn2, void *pOut2,
+ const void *pIn3, void *pOut3,
+ const void *pIn4, void *pOut4,
+ const uint32_t length)
+{
+ const uint8_t *pBufferIn1 = (const uint8_t *) pIn1;
+ uint8_t *pBufferOut1 = (uint8_t *) pOut1;
+ const uint8_t *pBufferIn2 = (const uint8_t *) pIn2;
+ uint8_t *pBufferOut2 = (uint8_t *) pOut2;
+ const uint8_t *pBufferIn3 = (const uint8_t *) pIn3;
+ uint8_t *pBufferOut3 = (uint8_t *) pOut3;
+ const uint8_t *pBufferIn4 = (const uint8_t *) pIn4;
+ uint8_t *pBufferOut4 = (uint8_t *) pOut4;
+ uint32_t lengthInBytes = length;
+ uint32_t blkcnt;
+ kasumi_union_t a1, b1; /* the modifier */
+ kasumi_union_t a2, b2; /* the modifier */
+ kasumi_union_t a3, b3; /* the modifier */
+ kasumi_union_t a4, b4; /* the modifier */
+ uint16_t *pTemp[4] = {b1.b16, b2.b16, b3.b16, b4.b16};
+ SafeBuf safeInBuf1, safeInBuf2, safeInBuf3, safeInBuf4;
+
+ /* IV Endianity */
+ b1.b64[0] = BSWAP64(IV1);
+ b2.b64[0] = BSWAP64(IV2);
+ b3.b64[0] = BSWAP64(IV3);
+ b4.b64[0] = BSWAP64(IV4);
+
+ kasumi_4_blocks(pCtx->msk16, pTemp);
+
+ /* Final initialisation steps */
+ blkcnt = 0;
+ a1.b64[0] = b1.b64[0];
+ a2.b64[0] = b2.b64[0];
+ a3.b64[0] = b3.b64[0];
+ a4.b64[0] = b4.b64[0];
+
+ /* Now run the block cipher for common packet lengthInBytes, a whole
+ * number of blocks */
+ while (lengthInBytes) {
+ /* KASUMI it to produce the next block of keystream for all the
+ * packets */
+ kasumi_4_blocks(pCtx->sk16, pTemp);
+
+ if (lengthInBytes > KASUMI_BLOCK_SIZE) {
+ /* xor and write keystream */
+ pBufferIn1 = xor_keystrm_rev(pBufferOut1,
+ pBufferIn1, b1.b64[0]);
+ pBufferOut1 += KASUMI_BLOCK_SIZE;
+ pBufferIn2 = xor_keystrm_rev(pBufferOut2,
+ pBufferIn2, b2.b64[0]);
+ pBufferOut2 += KASUMI_BLOCK_SIZE;
+ pBufferIn3 = xor_keystrm_rev(pBufferOut3,
+ pBufferIn3, b3.b64[0]);
+ pBufferOut3 += KASUMI_BLOCK_SIZE;
+ pBufferIn4 = xor_keystrm_rev(pBufferOut4,
+ pBufferIn4, b4.b64[0]);
+ pBufferOut4 += KASUMI_BLOCK_SIZE;
+ /* loop variant */
+ lengthInBytes -= KASUMI_BLOCK_SIZE;
+
+ /* apply the modifier and update the block count */
+ b1.b64[0] ^= a1.b64[0];
+ b1.b16[0] ^= (uint16_t)++blkcnt;
+ b2.b64[0] ^= a2.b64[0];
+ b2.b16[0] ^= (uint16_t)blkcnt;
+ b3.b64[0] ^= a3.b64[0];
+ b3.b16[0] ^= (uint16_t)blkcnt;
+ b4.b64[0] ^= a4.b64[0];
+ b4.b16[0] ^= (uint16_t)blkcnt;
+ } else if (lengthInBytes < KASUMI_BLOCK_SIZE) {
+ /* end of the loop, handle the last bytes */
+ memcpy_keystrm(safeInBuf1.b8, pBufferIn1,
+ lengthInBytes);
+ xor_keystrm_rev(b1.b8, safeInBuf1.b8, b1.b64[0]);
+ memcpy_keystrm(pBufferOut1, b1.b8, lengthInBytes);
+
+ memcpy_keystrm(safeInBuf2.b8, pBufferIn2,
+ lengthInBytes);
+ xor_keystrm_rev(b2.b8, safeInBuf2.b8, b2.b64[0]);
+ memcpy_keystrm(pBufferOut2, b2.b8, lengthInBytes);
+
+ memcpy_keystrm(safeInBuf3.b8, pBufferIn3,
+ lengthInBytes);
+ xor_keystrm_rev(b3.b8, safeInBuf3.b8, b3.b64[0]);
+ memcpy_keystrm(pBufferOut3, b3.b8, lengthInBytes);
+
+ memcpy_keystrm(safeInBuf4.b8, pBufferIn4,
+ lengthInBytes);
+ xor_keystrm_rev(b4.b8, safeInBuf4.b8, b4.b64[0]);
+ memcpy_keystrm(pBufferOut4, b4.b8, lengthInBytes);
+ lengthInBytes = 0;
+ /* lengthInBytes == KASUMI_BLOCK_SIZE */
+ } else {
+ xor_keystrm_rev(pBufferOut1, pBufferIn1, b1.b64[0]);
+ xor_keystrm_rev(pBufferOut2, pBufferIn2, b2.b64[0]);
+ xor_keystrm_rev(pBufferOut3, pBufferIn3, b3.b64[0]);
+ xor_keystrm_rev(pBufferOut4, pBufferIn4, b4.b64[0]);
+ lengthInBytes = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a1, sizeof(a1));
+ clear_mem(&b1, sizeof(b1));
+ clear_mem(&a2, sizeof(a2));
+ clear_mem(&b2, sizeof(b2));
+ clear_mem(&a3, sizeof(a3));
+ clear_mem(&b3, sizeof(b3));
+ clear_mem(&a4, sizeof(a4));
+ clear_mem(&b4, sizeof(b4));
+ clear_mem(&safeInBuf1, sizeof(safeInBuf1));
+ clear_mem(&safeInBuf2, sizeof(safeInBuf2));
+ clear_mem(&safeInBuf3, sizeof(safeInBuf3));
+ clear_mem(&safeInBuf4, sizeof(safeInBuf4));
+#endif
+}
+
+/*---------------------------------------------------------
+* @description
+* Kasumi F8 2 packet:
+* Two packets enc/dec with the same key schedule.
+* The 2 Ivs are independent and are passed as an array of values.
+* The packets are separate, the datalength is common
+*---------------------------------------------------------*/
+/******************************************************************************
+* @description
+* Kasumi F8 n packet:
+* Performs F8 enc/dec on [n] packets. The operation is performed in-place.
+* The input IV's are passed in Big Endian format.
+* The KeySchedule is in Little Endian format.
+*******************************************************************************/
+
+static inline void
+kasumi_f8_n_buffer(const kasumi_key_sched_t *pKeySchedule, const uint64_t IV[],
+ const void * const pIn[], void *pOut[],
+ const uint32_t lengths[], const uint32_t bufCount)
+{
+ if (bufCount > 16) {
+ pOut[0] = NULL;
+ printf("dataCount too high (%d)\n", bufCount);
+ return;
+ }
+
+ uint32_t dataCount = bufCount;
+ kasumi_union_t A[NUM_PACKETS_16], temp[NUM_PACKETS_16], tempSort;
+ uint16_t *data[NUM_PACKETS_16];
+ uint32_t dataLen[NUM_PACKETS_16];
+ uint8_t *pDataOut[NUM_PACKETS_16] = {NULL};
+ const uint8_t *pDataIn[NUM_PACKETS_16] = {NULL};
+ const uint8_t *srctempbuff;
+ uint8_t *dsttempbuff;
+ uint32_t blkcnt = 0;
+ uint32_t len = 0;
+ uint32_t packet_idx, inner_idx, same_size_blocks;
+ int sortNeeded = 0, tempLen = 0;
+ SafeBuf safeInBuf;
+
+ memcpy((void *)dataLen, lengths, dataCount * sizeof(uint32_t));
+ memcpy((void *)pDataIn, pIn, dataCount * sizeof(void *));
+ memcpy((void *)pDataOut, pOut, dataCount * sizeof(void *));
+
+ /* save the IV to A for each packet */
+ packet_idx = dataCount;
+ while (packet_idx--) {
+ /*copy IV in reverse endian order as input IV is BE */
+ temp[packet_idx].b64[0] = BSWAP64(IV[packet_idx]);
+
+ /* set LE IV pointers */
+ data[packet_idx] = temp[packet_idx].b16;
+
+ /* check if all packets are sorted by decreasing length */
+ if (packet_idx > 0 &&
+ dataLen[packet_idx - 1] < dataLen[packet_idx])
+ /* this packet array is not correctly sorted */
+ sortNeeded = 1;
+ }
+
+ /* do 1st kasumi block on A with modified key, this overwrites A */
+ kasumiWrapperArray[dataCount](pKeySchedule->msk16, data);
+
+ if (sortNeeded) {
+ /* sort packets in decreasing buffer size from [0] to [n]th
+ packet,
+ ** where buffer[0] will contain longest buffer and
+ buffer[n] will
+ contain the shortest buffer.
+ 4 arrays are swapped :
+ - pointers to input buffers
+ - pointers to output buffers
+ - pointers to input IV's
+ - input buffer lengths
+ */
+ packet_idx = dataCount;
+ while (packet_idx--) {
+ inner_idx = packet_idx;
+ while (inner_idx--) {
+ if (dataLen[packet_idx] > dataLen[inner_idx]) {
+
+ /* swap buffers to arrange in descending
+ * order from [0]. */
+ srctempbuff = pDataIn[packet_idx];
+ dsttempbuff = pDataOut[packet_idx];
+ tempSort = temp[packet_idx];
+ tempLen = dataLen[packet_idx];
+
+ pDataIn[packet_idx] =
+ pDataIn[inner_idx];
+ pDataOut[packet_idx] =
+ pDataOut[inner_idx];
+ temp[packet_idx] = temp[inner_idx];
+ dataLen[packet_idx] =
+ dataLen[inner_idx];
+
+ pDataIn[inner_idx] = srctempbuff;
+ pDataOut[inner_idx] = dsttempbuff;
+ temp[inner_idx] = tempSort;
+ dataLen[inner_idx] = tempLen;
+ }
+ } /* for inner packet idx (inner bubble-sort) */
+ } /* for outer packet idx (outer bubble-sort) */
+ } /* if sortNeeded */
+
+ packet_idx = dataCount;
+ while (packet_idx--)
+ /* copy the schedule */
+ A[packet_idx].b64[0] = temp[packet_idx].b64[0];
+
+ while (dataCount > 0) {
+ /* max num of blocks left depends on roundUp(smallest packet),
+ * The shortest stream to process is always stored at location
+ * [dataCount - 1]
+ */
+ same_size_blocks =
+ ((dataLen[dataCount - 1] + KASUMI_BLOCK_SIZE - 1) /
+ KASUMI_BLOCK_SIZE) -
+ blkcnt;
+
+ /* process streams of complete blocks */
+ while (same_size_blocks-- > 1) {
+ /* do kasumi block encryption */
+ kasumiWrapperArray[dataCount](pKeySchedule->sk16,
+ data);
+
+ packet_idx = dataCount;
+ while (packet_idx--)
+ xor_keystrm_rev(pDataOut[packet_idx] + len,
+ pDataIn[packet_idx] + len,
+ temp[packet_idx].b64[0]);
+
+ /* length already done since the start of the packets */
+ len += KASUMI_BLOCK_SIZE;
+
+ /* block idx is incremented and rewritten in the
+ * keystream */
+ blkcnt += 1;
+ packet_idx = dataCount;
+ while (packet_idx--) {
+ temp[packet_idx].b64[0] ^= A[packet_idx].b64[0];
+ temp[packet_idx].b16[0] ^= (uint16_t)blkcnt;
+ } /* for packet_idx */
+
+ } /* while same_size_blocks (iteration on multiple blocks) */
+
+ /* keystream for last block of all packets */
+ kasumiWrapperArray[dataCount](pKeySchedule->sk16, data);
+
+ /* process incomplete blocks without overwriting past the buffer
+ * end */
+ while ((dataCount > 0) &&
+ (dataLen[dataCount - 1] < (len + KASUMI_BLOCK_SIZE))) {
+
+ dataCount--;
+ /* incomplete block is copied into a temp buffer */
+ memcpy_keystrm(safeInBuf.b8, pDataIn[dataCount] + len,
+ dataLen[dataCount] - len);
+ xor_keystrm_rev(temp[dataCount].b8,
+ safeInBuf.b8,
+ temp[dataCount].b64[0]);
+
+ memcpy_keystrm(pDataOut[dataCount] + len,
+ temp[dataCount].b8,
+ dataLen[dataCount] - len);
+ } /* while dataCount */
+
+ /* process last blocks: it can be the last complete block of the
+ packets or, if
+ KASUMI_SAFE_BUFFER is defined, the last block (complete or not)
+ of the packets*/
+ while ((dataCount > 0) &&
+ (dataLen[dataCount - 1] <= (len + KASUMI_BLOCK_SIZE))) {
+
+ dataCount--;
+ xor_keystrm_rev(pDataOut[dataCount] + len,
+ pDataIn[dataCount] + len,
+ temp[dataCount].b64[0]);
+ } /* while dataCount */
+ /* block idx is incremented and rewritten in the keystream */
+ blkcnt += 1;
+
+ /* for the following packets, this block is not the last one:
+ dataCount is not decremented */
+ packet_idx = dataCount;
+ while (packet_idx--) {
+
+ xor_keystrm_rev(pDataOut[packet_idx] + len,
+ pDataIn[packet_idx] + len,
+ temp[packet_idx].b64[0]);
+ temp[packet_idx].b64[0] ^= A[packet_idx].b64[0];
+ temp[packet_idx].b16[0] ^= (uint16_t)blkcnt;
+ } /* while packet_idx */
+
+ /* length already done since the start of the packets */
+ len += KASUMI_BLOCK_SIZE;
+
+ /* the remaining packets, if any, have now at least one valid
+ block, which might be complete or not */
+
+ } /* while (dataCount) */
+#ifdef SAFE_DATA
+ uint32_t i;
+
+ /* Clear sensitive data in stack */
+ for (i = 0; i < dataCount; i++) {
+ clear_mem(&A[i], sizeof(A[i]));
+ clear_mem(&temp[i], sizeof(temp[i]));
+ }
+ clear_mem(&tempSort, sizeof(tempSort));
+ clear_mem(&safeInBuf, sizeof(safeInBuf));
+#endif
+}
+
+static inline void
+kasumi_f9_1_buffer(const kasumi_key_sched_t *pCtx, const void *dataIn,
+ const uint32_t length, void *pDigest)
+{
+ kasumi_union_t a, b, mask;
+ const uint64_t *pIn = (const uint64_t *)dataIn;
+ uint32_t lengthInBytes = length;
+ SafeBuf safeBuf;
+
+ /* Init */
+ a.b64[0] = 0;
+ b.b64[0] = 0;
+ mask.b64[0] = -1;
+
+ /* Now run kasumi for all 8 byte blocks */
+ while (lengthInBytes >= 8) {
+
+ a.b64[0] ^= BSWAP64(*(pIn++));
+
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+
+ /* loop variant */
+ lengthInBytes -= 8; /* done another 64 bits */
+
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ }
+
+ if (lengthInBytes) {
+ /* Not a whole 8 byte block remaining */
+ mask.b64[0] = ~(mask.b64[0] >> (BYTESIZE * lengthInBytes));
+ memcpy(&safeBuf.b64, pIn, lengthInBytes);
+ mask.b64[0] &= BSWAP64(safeBuf.b64);
+ a.b64[0] ^= mask.b64[0];
+
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ }
+
+ /* Kasumi b */
+ kasumi_1_block(pCtx->msk16, b.b16);
+
+ /* swap result */
+ *(uint32_t *)pDigest = bswap4(b.b32[1]);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a, sizeof(a));
+ clear_mem(&b, sizeof(b));
+ clear_mem(&mask, sizeof(mask));
+ clear_mem(&safeBuf, sizeof(safeBuf));
+#endif
+}
+
+/*---------------------------------------------------------
+* @description
+* Kasumi F9 1 packet with user config:
+* Single packet digest with user defined IV, and precomputed key schedule.
+*
+* IV = swap32(count) << 32 | swap32(fresh)
+*
+*---------------------------------------------------------*/
+
+static inline void
+kasumi_f9_1_buffer_user(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pDataIn, const uint32_t length,
+ void *pDigest, const uint32_t direction)
+{
+ kasumi_union_t a, b, mask, message, temp;
+ uint32_t lengthInBits = length;
+ const uint64_t *pIn = (const uint64_t *)pDataIn;
+ kasumi_union_t safebuff;
+
+ a.b64[0] = 0;
+ b.b64[0] = 0;
+
+ /* Use the count and fresh for first round */
+ a.b64[0] = BSWAP64(IV);
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+ /* update */
+ b.b64[0] = a.b64[0];
+
+ /* Now run kasumi for all 8 byte blocks */
+ while (lengthInBits >= QWORDSIZEINBITS) {
+ a.b64[0] ^= BSWAP64(*(pIn++));
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+ /* loop variant */
+ lengthInBits -= 64; /* done another 64 bits */
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ }
+
+ /* Is there any non 8 byte blocks remaining ? */
+ if (lengthInBits == 0) {
+ /* last block is : direct + 1 + 62 0's */
+ a.b64[0] ^= ((uint64_t)direction + direction + LAST_PADDING_BIT)
+ << (QWORDSIZEINBITS - 2);
+ kasumi_1_block(pCtx->sk16, a.b16);
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ } else if (lengthInBits <= (QWORDSIZEINBITS - 2)) {
+ /* last block is : message + direction + LAST_PADDING_BITS(1) +
+ * less than 62 0's */
+ mask.b64[0] = -1;
+ temp.b64[0] = 0;
+ message.b64[0] = 0;
+ mask.b64[0] = ~(mask.b64[0] >> lengthInBits);
+ /*round up and copy last lengthInBits */
+ memcpy(&safebuff.b64[0], pIn, (lengthInBits + 7) / 8);
+ message.b64[0] = BSWAP64(safebuff.b64[0]);
+ temp.b64[0] = mask.b64[0] & message.b64[0];
+ temp.b64[0] |=
+ ((uint64_t)direction + direction + LAST_PADDING_BIT)
+ << ((QWORDSIZEINBITS - 2) - lengthInBits);
+ a.b64[0] ^= temp.b64[0];
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ } else if (lengthInBits == (QWORDSIZEINBITS - 1)) {
+ /* next block is : message + direct */
+ /* last block is : 1 + 63 0's */
+ a.b64[0] ^= direction | (~1 & BSWAP64(*(pIn++)));
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ a.b8[QWORDSIZEINBYTES - 1] ^= (LAST_PADDING_BIT)
+ << (QWORDSIZEINBYTES - 1);
+ /* KASUMI it */
+ kasumi_1_block(pCtx->sk16, a.b16);
+ /* update */
+ b.b64[0] ^= a.b64[0];
+ }
+ /* Kasumi b */
+ kasumi_1_block(pCtx->msk16, b.b16);
+
+ /* swap result */
+ *(uint32_t *)pDigest = bswap4(b.b32[1]);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(&a, sizeof(a));
+ clear_mem(&b, sizeof(b));
+ clear_mem(&mask, sizeof(mask));
+ clear_mem(&message, sizeof(message));
+ clear_mem(&temp, sizeof(temp));
+ clear_mem(&safebuff, sizeof(safebuff));
+#endif
+}
+
+void kasumi_f8_1_buffer_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBytes);
+
+void kasumi_f8_1_buffer_bit_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+
+void kasumi_f8_2_buffer_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const void *pBufferIn1, void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+
+void kasumi_f8_3_buffer_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const uint64_t IV3,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes);
+
+void kasumi_f8_4_buffer_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const uint64_t IV3, const uint64_t IV4,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes);
+
+void kasumi_f8_n_buffer_sse(const kasumi_key_sched_t *pKeySchedule,
+ const uint64_t IV[],
+ const void * const pDataIn[], void *pDataOut[],
+ const uint32_t dataLen[], const uint32_t dataCount);
+
+void kasumi_f9_1_buffer_sse(const kasumi_key_sched_t *pCtx,
+ const void *pBufferIn,
+ const uint32_t lengthInBytes, void *pDigest);
+
+void kasumi_f9_1_buffer_user_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV, const void *pBufferIn,
+ const uint32_t lengthInBits,
+ void *pDigest, const uint32_t direction);
+
+
+void kasumi_f8_1_buffer_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBytes);
+void kasumi_f8_1_buffer_bit_avx(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+void kasumi_f8_2_buffer_avx(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const void *pBufferIn1, void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+void kasumi_f8_3_buffer_avx(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const uint64_t IV3,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes);
+void kasumi_f8_4_buffer_avx(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const uint64_t IV3, const uint64_t IV4,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes);
+void kasumi_f8_n_buffer_avx(const kasumi_key_sched_t *pKeySchedule,
+ const uint64_t IV[],
+ const void * const pDataIn[], void *pDataOut[],
+ const uint32_t dataLen[], const uint32_t dataCount);
+
+void kasumi_f9_1_buffer_avx(const kasumi_key_sched_t *pCtx,
+ const void *pBufferIn,
+ const uint32_t lengthInBytes, void *pDigest);
+
+void kasumi_f9_1_buffer_user_avx(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV, const void *pBufferIn,
+ const uint32_t lengthInBits,
+ void *pDigest, const uint32_t direction);
+#endif /*_KASUMI_INTERNAL_H_*/
+
diff --git a/src/spdk/intel-ipsec-mb/include/memcpy.asm b/src/spdk/intel-ipsec-mb/include/memcpy.asm
new file mode 100644
index 000000000..82e4f2cb2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/memcpy.asm
@@ -0,0 +1,613 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef __MEMCPY_ASM__
+%define __MEMCPY_ASM__
+
+%include "include/reg_sizes.asm"
+
+
+; This section defines a series of macros to copy small to medium amounts
+; of data from memory to memory, where the size is variable but limited.
+;
+; The macros are all called as:
+; memcpy DST, SRC, SIZE, TMP0, TMP1, XTMP0, XTMP1, XTMP2, XTMP3
+; with the parameters defined as:
+; DST : register: pointer to dst (not modified)
+; SRC : register: pointer to src (not modified)
+; SIZE : register: length in bytes (not modified)
+; TMP0 : 64-bit temp GPR (clobbered)
+; TMP1 : 64-bit temp GPR (clobbered)
+; XTMP0 : temp XMM (clobbered)
+; XTMP1 : temp XMM (clobbered)
+; XTMP2 : temp XMM (clobbered)
+; XTMP3 : temp XMM (clobbered)
+;
+; The name indicates the options. The name is of the form:
+; memcpy_<VEC>_<SZ><ZERO><RET>
+; where:
+; <VEC> is either "sse" or "avx" or "avx2"
+; <SZ> is either "64" or "128" and defines largest value of SIZE
+; <ZERO> is blank or "_1". If "_1" then the min SIZE is 1 (otherwise 0)
+; <RET> is blank or "_ret". If blank, the code falls through. If "ret"
+; it does a "ret" at the end
+;
+; For the avx2 versions, the temp XMM registers need to be YMM registers
+; If the SZ is 64, then only two YMM temps are needed, i.e. it is called as:
+; memcpy_avx2_64 DST, SRC, SIZE, TMP0, TMP1, YTMP0, YTMP1
+; memcpy_avx2_128 DST, SRC, SIZE, TMP0, TMP1, YTMP0, YTMP1, YTMP2, YTMP3
+;
+; For example:
+; memcpy_sse_64 : SSE, 0 <= size < 64, falls through
+; memcpy_avx_64_1 : AVX1, 1 <= size < 64, falls through
+; memcpy_sse_128_ret : SSE, 0 <= size < 128, ends with ret
+; mempcy_avx_128_1_ret : AVX1, 1 <= size < 128, ends with ret
+;
+
+%macro memcpy_sse_64 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 64, 0, 0
+%endm
+
+%macro memcpy_sse_64_1 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 64, 0, 0
+%endm
+
+%macro memcpy_sse_128 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 128, 0, 0
+%endm
+
+%macro memcpy_sse_128_1 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 128, 0, 0
+%endm
+
+%macro memcpy_sse_64_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 64, 1, 0
+%endm
+
+%macro memcpy_sse_64_1_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 64, 1, 0
+%endm
+
+%macro memcpy_sse_128_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 128, 1, 0
+%endm
+
+%macro memcpy_sse_128_1_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 128, 1, 0
+%endm
+
+
+%macro memcpy_sse_16 5
+ __memcpy_int %1,%2,%3,%4,%5,,,,, 0, 16, 0, 0
+%endm
+
+%macro memcpy_sse_16_1 5
+ __memcpy_int %1,%2,%3,%4,%5,,,,, 1, 16, 0, 0
+%endm
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%macro memcpy_avx_64 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 64, 0, 1
+%endm
+
+%macro memcpy_avx_64_1 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 64, 0, 1
+%endm
+
+%macro memcpy_avx_128 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 128, 0, 1
+%endm
+
+%macro memcpy_avx_128_1 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 128, 0, 1
+%endm
+
+%macro memcpy_avx_64_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 64, 1, 1
+%endm
+
+%macro memcpy_avx_64_1_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 64, 1, 1
+%endm
+
+%macro memcpy_avx_128_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 128, 1, 1
+%endm
+
+%macro memcpy_avx_128_1_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 128, 1, 1
+%endm
+
+
+%macro memcpy_avx_16 5
+ __memcpy_int %1,%2,%3,%4,%5,,,,, 0, 16, 0, 1
+%endm
+
+%macro memcpy_avx_16_1 5
+ __memcpy_int %1,%2,%3,%4,%5,,,,, 1, 16, 0, 1
+%endm
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%macro memcpy_avx2_64 7
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,--,--, 0, 64, 0, 2
+%endm
+
+%macro memcpy_avx2_64_1 7
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,--,--, 1, 64, 0, 2
+%endm
+
+%macro memcpy_avx2_128 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7, %8, %9, 0, 128, 0, 2
+%endm
+
+%macro memcpy_avx2_128_1 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7, %8, %9, 1, 128, 0, 2
+%endm
+
+%macro memcpy_avx2_64_ret 7
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,--,--, 0, 64, 1, 2
+%endm
+
+%macro memcpy_avx2_64_1_ret 7
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,--,--, 1, 64, 1, 2
+%endm
+
+%macro memcpy_avx2_128_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 0, 128, 1, 2
+%endm
+
+%macro memcpy_avx2_128_1_ret 9
+ __memcpy_int %1,%2,%3,%4,%5,%6,%7,%8,%9, 1, 128, 1, 2
+%endm
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+%macro __memcpy_int 13
+%define %%DST %1 ; register: pointer to dst (not modified)
+%define %%SRC %2 ; register: pointer to src (not modified)
+%define %%SIZE %3 ; register: length in bytes (not modified)
+%define %%TMP0 %4 ; 64-bit temp GPR (clobbered)
+%define %%TMP1 %5 ; 64-bit temp GPR (clobbered)
+%define %%XTMP0 %6 ; temp XMM (clobbered)
+%define %%XTMP1 %7 ; temp XMM (clobbered)
+%define %%XTMP2 %8 ; temp XMM (clobbered)
+%define %%XTMP3 %9 ; temp XMM (clobbered)
+%define %%NOT0 %10 ; if not 0, then assume size cannot be zero
+%define %%MAXSIZE %11 ; 128, 64, etc
+%define %%USERET %12 ; if not 0, use "ret" at end
+%define %%USEAVX %13 ; 0 = SSE, 1 = AVX1, 2 = AVX2
+
+%if (%%USERET != 0)
+ %define %%DONE ret
+%else
+ %define %%DONE jmp %%end
+%endif
+
+%if (%%USEAVX != 0)
+ %define %%MOVDQU vmovdqu
+%else
+ %define %%MOVDQU movdqu
+%endif
+
+%if (%%MAXSIZE >= 128)
+ test %%SIZE, 64
+ jz %%lt64
+ %if (%%USEAVX >= 2)
+ %%MOVDQU %%XTMP0, [%%SRC + 0*32]
+ %%MOVDQU %%XTMP1, [%%SRC + 1*32]
+ %%MOVDQU %%XTMP2, [%%SRC + %%SIZE - 2*32]
+ %%MOVDQU %%XTMP3, [%%SRC + %%SIZE - 1*32]
+
+ %%MOVDQU [%%DST + 0*32], %%XTMP0
+ %%MOVDQU [%%DST + 1*32], %%XTMP1
+ %%MOVDQU [%%DST + %%SIZE - 2*32], %%XTMP2
+ %%MOVDQU [%%DST + %%SIZE - 1*32], %%XTMP3
+ %else
+ %%MOVDQU %%XTMP0, [%%SRC + 0*16]
+ %%MOVDQU %%XTMP1, [%%SRC + 1*16]
+ %%MOVDQU %%XTMP2, [%%SRC + 2*16]
+ %%MOVDQU %%XTMP3, [%%SRC + 3*16]
+ %%MOVDQU [%%DST + 0*16], %%XTMP0
+ %%MOVDQU [%%DST + 1*16], %%XTMP1
+ %%MOVDQU [%%DST + 2*16], %%XTMP2
+ %%MOVDQU [%%DST + 3*16], %%XTMP3
+
+ %%MOVDQU %%XTMP0, [%%SRC + %%SIZE - 4*16]
+ %%MOVDQU %%XTMP1, [%%SRC + %%SIZE - 3*16]
+ %%MOVDQU %%XTMP2, [%%SRC + %%SIZE - 2*16]
+ %%MOVDQU %%XTMP3, [%%SRC + %%SIZE - 1*16]
+ %%MOVDQU [%%DST + %%SIZE - 4*16], %%XTMP0
+ %%MOVDQU [%%DST + %%SIZE - 3*16], %%XTMP1
+ %%MOVDQU [%%DST + %%SIZE - 2*16], %%XTMP2
+ %%MOVDQU [%%DST + %%SIZE - 1*16], %%XTMP3
+ %endif
+ %%DONE
+%endif
+
+%if (%%MAXSIZE >= 64)
+%%lt64:
+ test %%SIZE, 32
+ jz %%lt32
+ %if (%%USEAVX >= 2)
+ %%MOVDQU %%XTMP0, [%%SRC + 0*32]
+ %%MOVDQU %%XTMP1, [%%SRC + %%SIZE - 1*32]
+ %%MOVDQU [%%DST + 0*32], %%XTMP0
+ %%MOVDQU [%%DST + %%SIZE - 1*32], %%XTMP1
+ %else
+ %%MOVDQU %%XTMP0, [%%SRC + 0*16]
+ %%MOVDQU %%XTMP1, [%%SRC + 1*16]
+ %%MOVDQU %%XTMP2, [%%SRC + %%SIZE - 2*16]
+ %%MOVDQU %%XTMP3, [%%SRC + %%SIZE - 1*16]
+ %%MOVDQU [%%DST + 0*16], %%XTMP0
+ %%MOVDQU [%%DST + 1*16], %%XTMP1
+ %%MOVDQU [%%DST + %%SIZE - 2*16], %%XTMP2
+ %%MOVDQU [%%DST + %%SIZE - 1*16], %%XTMP3
+ %endif
+ %%DONE
+%endif
+
+%if (%%MAXSIZE >= 32)
+%%lt32:
+ test %%SIZE, 16
+ jz %%lt16
+ %if (%%USEAVX >= 2)
+ %%MOVDQU XWORD(%%XTMP0), [%%SRC + 0*16]
+ %%MOVDQU XWORD(%%XTMP1), [%%SRC + %%SIZE - 1*16]
+ %%MOVDQU [%%DST + 0*16], XWORD(%%XTMP0)
+ %%MOVDQU [%%DST + %%SIZE - 1*16], XWORD(%%XTMP1)
+ %else
+ %%MOVDQU %%XTMP0, [%%SRC + 0*16]
+ %%MOVDQU %%XTMP1, [%%SRC + %%SIZE - 1*16]
+ %%MOVDQU [%%DST + 0*16], %%XTMP0
+ %%MOVDQU [%%DST + %%SIZE - 1*16], %%XTMP1
+ %endif
+ %%DONE
+%endif
+
+%if (%%MAXSIZE >= 16)
+%%lt16:
+ test %%SIZE, 8
+ jz %%lt8
+ mov %%TMP0, [%%SRC]
+ mov %%TMP1, [%%SRC + %%SIZE - 8]
+ mov [%%DST], %%TMP0
+ mov [%%DST + %%SIZE - 8], %%TMP1
+ %%DONE
+%endif
+
+%if (%%MAXSIZE >= 8)
+%%lt8:
+ test %%SIZE, 4
+ jz %%lt4
+ mov DWORD(%%TMP0), [%%SRC]
+ mov DWORD(%%TMP1), [%%SRC + %%SIZE - 4]
+ mov [%%DST], DWORD(%%TMP0)
+ mov [%%DST + %%SIZE - 4], DWORD(%%TMP1)
+ %%DONE
+%endif
+
+%if (%%MAXSIZE >= 4)
+%%lt4:
+ test %%SIZE, 2
+ jz %%lt2
+ movzx DWORD(%%TMP0), word [%%SRC]
+ movzx DWORD(%%TMP1), byte [%%SRC + %%SIZE - 1]
+ mov [%%DST], WORD(%%TMP0)
+ mov [%%DST + %%SIZE - 1], BYTE(%%TMP1)
+ %%DONE
+%endif
+
+%%lt2:
+%if (%%NOT0 == 0)
+ test %%SIZE, 1
+ jz %%end
+%endif
+ movzx DWORD(%%TMP0), byte [%%SRC]
+ mov [%%DST], BYTE(%%TMP0)
+%%end:
+%if (%%USERET != 0)
+ ret
+%endif
+%endm
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Utility macro to assist with SIMD shifting
+%macro _PSRLDQ 3
+%define %%VEC %1
+%define %%REG %2
+%define %%IMM %3
+
+%ifidn %%VEC, SSE
+ psrldq %%REG, %%IMM
+%else
+ vpsrldq %%REG, %%REG, %%IMM
+%endif
+%endm
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; This section defines a series of macros to store small to medium amounts
+; of data from SIMD registers to memory, where the size is variable but limited.
+;
+; The macros are all called as:
+; memcpy DST, SRC, SIZE, TMP, IDX
+; with the parameters defined as:
+; DST : register: pointer to dst (not modified)
+; SRC : register: src data (clobbered)
+; SIZE : register: length in bytes (not modified)
+; TMP : 64-bit temp GPR (clobbered)
+; IDX : 64-bit GPR to store dst index/offset (clobbered)
+;
+; The name indicates the options. The name is of the form:
+; simd_store_<VEC>
+; where <VEC> is the SIMD instruction type e.g. "sse" or "avx"
+
+
+%macro simd_store_sse 5
+ __simd_store %1,%2,%3,%4,%5,SSE
+%endm
+
+%macro simd_store_avx 5
+ __simd_store %1,%2,%3,%4,%5,AVX
+%endm
+
+%macro simd_store_sse_15 5
+ __simd_store %1,%2,%3,%4,%5,SSE,15
+%endm
+
+%macro simd_store_avx_15 5
+ __simd_store %1,%2,%3,%4,%5,AVX,15
+%endm
+
+%macro __simd_store 6-7
+%define %%DST %1 ; register: pointer to dst (not modified)
+%define %%SRC %2 ; register: src data (clobbered)
+%define %%SIZE %3 ; register: length in bytes (not modified)
+%define %%TMP %4 ; 64-bit temp GPR (clobbered)
+%define %%IDX %5 ; 64-bit temp GPR to store dst idx (clobbered)
+%define %%SIMDTYPE %6 ; "SSE" or "AVX"
+%define %%MAX_LEN %7 ; [optional] maximum length to be stored, default 16
+
+%define %%PSRLDQ _PSRLDQ %%SIMDTYPE,
+
+%ifidn %%SIMDTYPE, SSE
+ %define %%MOVDQU movdqu
+ %define %%MOVQ movq
+%else
+ %define %%MOVDQU vmovdqu
+ %define %%MOVQ vmovq
+%endif
+
+;; determine max byte size for store operation
+%if %0 > 6
+%assign max_length_to_store %%MAX_LEN
+%else
+%assign max_length_to_store 16
+%endif
+
+%if max_length_to_store > 16
+%error "__simd_store macro invoked with MAX_LEN bigger than 16!"
+%endif
+
+ xor %%IDX, %%IDX ; zero idx
+
+%if max_length_to_store == 16
+ test %%SIZE, 16
+ jz %%lt16
+ %%MOVDQU [%%DST], %%SRC
+ jmp %%end
+%%lt16:
+%endif
+
+%if max_length_to_store >= 8
+ test %%SIZE, 8
+ jz %%lt8
+ %%MOVQ [%%DST + %%IDX], %%SRC
+ %%PSRLDQ %%SRC, 8
+ add %%IDX, 8
+%%lt8:
+%endif
+
+ %%MOVQ %%TMP, %%SRC ; use GPR from now on
+
+%if max_length_to_store >= 4
+ test %%SIZE, 4
+ jz %%lt4
+ mov [%%DST + %%IDX], DWORD(%%TMP)
+ shr %%TMP, 32
+ add %%IDX, 4
+%%lt4:
+%endif
+
+ test %%SIZE, 2
+ jz %%lt2
+ mov [%%DST + %%IDX], WORD(%%TMP)
+ shr %%TMP, 16
+ add %%IDX, 2
+%%lt2:
+ test %%SIZE, 1
+ jz %%end
+ mov [%%DST + %%IDX], BYTE(%%TMP)
+%%end:
+%endm
+
+; This section defines a series of macros to load small to medium amounts
+; (from 0 to 16 bytes) of data from memory to SIMD registers,
+; where the size is variable but limited.
+;
+; The macros are all called as:
+; simd_load DST, SRC, SIZE
+; with the parameters defined as:
+; DST : register: destination XMM register
+; SRC : register: pointer to src data (not modified)
+; SIZE : register: length in bytes (not modified)
+;
+; The name indicates the options. The name is of the form:
+; simd_load_<VEC>_<SZ><ZERO>
+; where:
+; <VEC> is either "sse" or "avx"
+; <SZ> is either "15" or "16" and defines largest value of SIZE
+; <ZERO> is blank or "_1". If "_1" then the min SIZE is 1 (otherwise 0)
+;
+; For example:
+; simd_load_sse_16 : SSE, 0 <= size <= 16
+; simd_load_avx_15_1 : AVX, 1 <= size <= 15
+
+%macro simd_load_sse_15_1 3
+ __simd_load %1,%2,%3,0,0,SSE
+%endm
+%macro simd_load_sse_15 3
+ __simd_load %1,%2,%3,1,0,SSE
+%endm
+%macro simd_load_sse_16_1 3
+ __simd_load %1,%2,%3,0,1,SSE
+%endm
+%macro simd_load_sse_16 3
+ __simd_load %1,%2,%3,1,1,SSE
+%endm
+
+%macro simd_load_avx_15_1 3
+ __simd_load %1,%2,%3,0,0,AVX
+%endm
+%macro simd_load_avx_15 3
+ __simd_load %1,%2,%3,1,0,AVX
+%endm
+%macro simd_load_avx_16_1 3
+ __simd_load %1,%2,%3,0,1,AVX
+%endm
+%macro simd_load_avx_16 3
+ __simd_load %1,%2,%3,1,1,AVX
+%endm
+
+%macro __simd_load 6
+%define %%DST %1 ; [out] destination XMM register
+%define %%SRC %2 ; [in] pointer to src data
+%define %%SIZE %3 ; [in] length in bytes (0-16 bytes)
+%define %%ACCEPT_0 %4 ; 0 = min length = 1, 1 = min length = 0
+%define %%ACCEPT_16 %5 ; 0 = max length = 15 , 1 = max length = 16
+%define %%SIMDTYPE %6 ; "SSE" or "AVX"
+
+%ifidn %%SIMDTYPE, SSE
+ %define %%MOVDQU movdqu
+ %define %%PINSRB pinsrb
+ %define %%PINSRQ pinsrq
+ %define %%PXOR pxor
+%else
+ %define %%MOVDQU vmovdqu
+ %define %%PINSRB vpinsrb
+ %define %%PINSRQ vpinsrq
+ %define %%PXOR vpxor
+%endif
+
+%if (%%ACCEPT_16 != 0)
+ test %%SIZE, 16
+ jz %%_skip_16
+ %%MOVDQU %%DST, [%%SRC]
+ jmp %%end_load
+
+%%_skip_16:
+%endif
+ %%PXOR %%DST, %%DST ; clear XMM register
+%if (%%ACCEPT_0 != 0)
+ or %%SIZE, %%SIZE
+ je %%end_load
+%endif
+ cmp %%SIZE, 1
+ je %%_size_1
+ cmp %%SIZE, 2
+ je %%_size_2
+ cmp %%SIZE, 3
+ je %%_size_3
+ cmp %%SIZE, 4
+ je %%_size_4
+ cmp %%SIZE, 5
+ je %%_size_5
+ cmp %%SIZE, 6
+ je %%_size_6
+ cmp %%SIZE, 7
+ je %%_size_7
+ cmp %%SIZE, 8
+ je %%_size_8
+ cmp %%SIZE, 9
+ je %%_size_9
+ cmp %%SIZE, 10
+ je %%_size_10
+ cmp %%SIZE, 11
+ je %%_size_11
+ cmp %%SIZE, 12
+ je %%_size_12
+ cmp %%SIZE, 13
+ je %%_size_13
+ cmp %%SIZE, 14
+ je %%_size_14
+
+%%_size_15:
+ %%PINSRB %%DST, [%%SRC + 14], 14
+%%_size_14:
+ %%PINSRB %%DST, [%%SRC + 13], 13
+%%_size_13:
+ %%PINSRB %%DST, [%%SRC + 12], 12
+%%_size_12:
+ %%PINSRB %%DST, [%%SRC + 11], 11
+%%_size_11:
+ %%PINSRB %%DST, [%%SRC + 10], 10
+%%_size_10:
+ %%PINSRB %%DST, [%%SRC + 9], 9
+%%_size_9:
+ %%PINSRB %%DST, [%%SRC + 8], 8
+%%_size_8:
+ %%PINSRQ %%DST, [%%SRC], 0
+ jmp %%end_load
+%%_size_7:
+ %%PINSRB %%DST, [%%SRC + 6], 6
+%%_size_6:
+ %%PINSRB %%DST, [%%SRC + 5], 5
+%%_size_5:
+ %%PINSRB %%DST, [%%SRC + 4], 4
+%%_size_4:
+ %%PINSRB %%DST, [%%SRC + 3], 3
+%%_size_3:
+ %%PINSRB %%DST, [%%SRC + 2], 2
+%%_size_2:
+ %%PINSRB %%DST, [%%SRC + 1], 1
+%%_size_1:
+ %%PINSRB %%DST, [%%SRC + 0], 0
+%%end_load:
+%endm
+%endif ; ifndef __MEMCPY_ASM__
diff --git a/src/spdk/intel-ipsec-mb/include/noaesni.h b/src/spdk/intel-ipsec-mb/include/noaesni.h
new file mode 100644
index 000000000..30d970edf
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/noaesni.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "intel-ipsec-mb.h"
+
+#ifndef NOAESNI_H
+#define NOAESNI_H
+
+IMB_DLL_EXPORT void init_mb_mgr_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_nocheck_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *flush_job_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT uint32_t queue_size_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_completed_job_sse_no_aesni(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_next_job_sse_no_aesni(MB_MGR *state);
+
+IMB_DLL_EXPORT void
+aes_keyexp_128_sse_no_aesni(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void
+aes_keyexp_192_sse_no_aesni(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void
+aes_keyexp_256_sse_no_aesni(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void
+aes_xcbc_expand_key_sse_no_aesni(const void *key, void *k1_exp, void *k2,
+ void *k3);
+IMB_DLL_EXPORT void
+aes_keyexp_128_enc_sse_no_aesni(const void *key, void *enc_exp_keys);
+IMB_DLL_EXPORT void
+aes_keyexp_192_enc_sse_no_aesni(const void *key, void *enc_exp_keys);
+IMB_DLL_EXPORT void
+aes_keyexp_256_enc_sse_no_aesni(const void *key, void *enc_exp_keys);
+IMB_DLL_EXPORT void
+aes_cmac_subkey_gen_sse_no_aesni(const void *key_exp, void *key1, void *key2);
+IMB_DLL_EXPORT void
+aes_cfb_128_one_sse_no_aesni(void *out, const void *in, const void *iv,
+ const void *keys, uint64_t len);
+
+#endif /* NOAESNI_H */
diff --git a/src/spdk/intel-ipsec-mb/include/os.asm b/src/spdk/intel-ipsec-mb/include/os.asm
new file mode 100644
index 000000000..f54043ed2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/os.asm
@@ -0,0 +1,58 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+%ifndef OS_ASM_FILE
+%define OS_ASM_FILE
+
+%ifndef WIN_ABI
+%ifidn __OUTPUT_FORMAT__, win64
+%define WIN_ABI
+%endif
+%endif
+
+%ifndef LINUX
+%ifidn __OUTPUT_FORMAT__, elf64
+%define LINUX
+%endif
+%endif
+
+%ifdef LINUX
+;;; macro to declare global symbols
+;;; - name : symbol name
+;;; - type : funtion or data
+;;; - scope : internal, private, default
+%define MKGLOBAL(name,type,scope) global name %+ : %+ type scope
+%endif ; LINUX
+
+%ifdef WIN_ABI
+;;; macro to declare global symbols
+;;; - name : symbol name
+;;; - type : funtion or data
+;;; - scope : internal, private, default (ignored in win64 coff format)
+%define MKGLOBAL(name,type,scope) global name
+%endif ; WIN_ABI
+
+%endif ; OS_ASM_FILE
diff --git a/src/spdk/intel-ipsec-mb/include/reg_sizes.asm b/src/spdk/intel-ipsec-mb/include/reg_sizes.asm
new file mode 100644
index 000000000..c9f9f8cd2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/reg_sizes.asm
@@ -0,0 +1,300 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; define d and w variants for registers
+
+%ifndef _REG_SIZES_ASM_
+%define _REG_SIZES_ASM_
+
+%define raxd eax
+%define raxw ax
+%define raxb al
+
+%define rbxd ebx
+%define rbxw bx
+%define rbxb bl
+
+%define rcxd ecx
+%define rcxw cx
+%define rcxb cl
+
+%define rdxd edx
+%define rdxw dx
+%define rdxb dl
+
+%define rsid esi
+%define rsiw si
+%define rsib sil
+
+%define rdid edi
+%define rdiw di
+%define rdib dil
+
+%define rbpd ebp
+%define rbpw bp
+%define rbpb bpl
+
+%define zmm0x xmm0
+%define zmm1x xmm1
+%define zmm2x xmm2
+%define zmm3x xmm3
+%define zmm4x xmm4
+%define zmm5x xmm5
+%define zmm6x xmm6
+%define zmm7x xmm7
+%define zmm8x xmm8
+%define zmm9x xmm9
+%define zmm10x xmm10
+%define zmm11x xmm11
+%define zmm12x xmm12
+%define zmm13x xmm13
+%define zmm14x xmm14
+%define zmm15x xmm15
+%define zmm16x xmm16
+%define zmm17x xmm17
+%define zmm18x xmm18
+%define zmm19x xmm19
+%define zmm20x xmm20
+%define zmm21x xmm21
+%define zmm22x xmm22
+%define zmm23x xmm23
+%define zmm24x xmm24
+%define zmm25x xmm25
+%define zmm26x xmm26
+%define zmm27x xmm27
+%define zmm28x xmm28
+%define zmm29x xmm29
+%define zmm30x xmm30
+%define zmm31x xmm31
+
+%define ymm0x xmm0
+%define ymm1x xmm1
+%define ymm2x xmm2
+%define ymm3x xmm3
+%define ymm4x xmm4
+%define ymm5x xmm5
+%define ymm6x xmm6
+%define ymm7x xmm7
+%define ymm8x xmm8
+%define ymm9x xmm9
+%define ymm10x xmm10
+%define ymm11x xmm11
+%define ymm12x xmm12
+%define ymm13x xmm13
+%define ymm14x xmm14
+%define ymm15x xmm15
+%define ymm16x xmm16
+%define ymm17x xmm17
+%define ymm18x xmm18
+%define ymm19x xmm19
+%define ymm20x xmm20
+%define ymm21x xmm21
+%define ymm22x xmm22
+%define ymm23x xmm23
+%define ymm24x xmm24
+%define ymm25x xmm25
+%define ymm26x xmm26
+%define ymm27x xmm27
+%define ymm28x xmm28
+%define ymm29x xmm29
+%define ymm30x xmm30
+%define ymm31x xmm31
+
+%define xmm0x xmm0
+%define xmm1x xmm1
+%define xmm2x xmm2
+%define xmm3x xmm3
+%define xmm4x xmm4
+%define xmm5x xmm5
+%define xmm6x xmm6
+%define xmm7x xmm7
+%define xmm8x xmm8
+%define xmm9x xmm9
+%define xmm10x xmm10
+%define xmm11x xmm11
+%define xmm12x xmm12
+%define xmm13x xmm13
+%define xmm14x xmm14
+%define xmm15x xmm15
+%define xmm16x xmm16
+%define xmm17x xmm17
+%define xmm18x xmm18
+%define xmm19x xmm19
+%define xmm20x xmm20
+%define xmm21x xmm21
+%define xmm22x xmm22
+%define xmm23x xmm23
+%define xmm24x xmm24
+%define xmm25x xmm25
+%define xmm26x xmm26
+%define xmm27x xmm27
+%define xmm28x xmm28
+%define xmm29x xmm29
+%define xmm30x xmm30
+%define xmm31x xmm31
+
+%define zmm0y ymm0
+%define zmm1y ymm1
+%define zmm2y ymm2
+%define zmm3y ymm3
+%define zmm4y ymm4
+%define zmm5y ymm5
+%define zmm6y ymm6
+%define zmm7y ymm7
+%define zmm8y ymm8
+%define zmm9y ymm9
+%define zmm10y ymm10
+%define zmm11y ymm11
+%define zmm12y ymm12
+%define zmm13y ymm13
+%define zmm14y ymm14
+%define zmm15y ymm15
+%define zmm16y ymm16
+%define zmm17y ymm17
+%define zmm18y ymm18
+%define zmm19y ymm19
+%define zmm20y ymm20
+%define zmm21y ymm21
+%define zmm22y ymm22
+%define zmm23y ymm23
+%define zmm24y ymm24
+%define zmm25y ymm25
+%define zmm26y ymm26
+%define zmm27y ymm27
+%define zmm28y ymm28
+%define zmm29y ymm29
+%define zmm30y ymm30
+%define zmm31y ymm31
+
+%define xmm0y ymm0
+%define xmm1y ymm1
+%define xmm2y ymm2
+%define xmm3y ymm3
+%define xmm4y ymm4
+%define xmm5y ymm5
+%define xmm6y ymm6
+%define xmm7y ymm7
+%define xmm8y ymm8
+%define xmm9y ymm9
+%define xmm10y ymm10
+%define xmm11y ymm11
+%define xmm12y ymm12
+%define xmm13y ymm13
+%define xmm14y ymm14
+%define xmm15y ymm15
+%define xmm16y ymm16
+%define xmm17y ymm17
+%define xmm18y ymm18
+%define xmm19y ymm19
+%define xmm20y ymm20
+%define xmm21y ymm21
+%define xmm22y ymm22
+%define xmm23y ymm23
+%define xmm24y ymm24
+%define xmm25y ymm25
+%define xmm26y ymm26
+%define xmm27y ymm27
+%define xmm28y ymm28
+%define xmm29y ymm29
+%define xmm30y ymm30
+%define xmm31y ymm31
+
+%define xmm0z zmm0
+%define xmm1z zmm1
+%define xmm2z zmm2
+%define xmm3z zmm3
+%define xmm4z zmm4
+%define xmm5z zmm5
+%define xmm6z zmm6
+%define xmm7z zmm7
+%define xmm8z zmm8
+%define xmm9z zmm9
+%define xmm10z zmm10
+%define xmm11z zmm11
+%define xmm12z zmm12
+%define xmm13z zmm13
+%define xmm14z zmm14
+%define xmm15z zmm15
+%define xmm16z zmm16
+%define xmm17z zmm17
+%define xmm18z zmm18
+%define xmm19z zmm19
+%define xmm20z zmm20
+%define xmm21z zmm21
+%define xmm22z zmm22
+%define xmm23z zmm23
+%define xmm24z zmm24
+%define xmm25z zmm25
+%define xmm26z zmm26
+%define xmm27z zmm27
+%define xmm28z zmm28
+%define xmm29z zmm29
+%define xmm30z zmm30
+%define xmm31z zmm31
+
+%define ymm0z zmm0
+%define ymm1z zmm1
+%define ymm2z zmm2
+%define ymm3z zmm3
+%define ymm4z zmm4
+%define ymm5z zmm5
+%define ymm6z zmm6
+%define ymm7z zmm7
+%define ymm8z zmm8
+%define ymm9z zmm9
+%define ymm10z zmm10
+%define ymm11z zmm11
+%define ymm12z zmm12
+%define ymm13z zmm13
+%define ymm14z zmm14
+%define ymm15z zmm15
+%define ymm16z zmm16
+%define ymm17z zmm17
+%define ymm18z zmm18
+%define ymm19z zmm19
+%define ymm20z zmm20
+%define ymm21z zmm21
+%define ymm22z zmm22
+%define ymm23z zmm23
+%define ymm24z zmm24
+%define ymm25z zmm25
+%define ymm26z zmm26
+%define ymm27z zmm27
+%define ymm28z zmm28
+%define ymm29z zmm29
+%define ymm30z zmm30
+%define ymm31z zmm31
+
+%define DWORD(reg) reg %+ d
+%define WORD(reg) reg %+ w
+%define BYTE(reg) reg %+ b
+
+%define XWORD(reg) reg %+ x
+%define YWORD(reg) reg %+ y
+%define ZWORD(reg) reg %+ z
+
+%endif ;; _REG_SIZES_ASM_
diff --git a/src/spdk/intel-ipsec-mb/include/save_xmms.asm b/src/spdk/intel-ipsec-mb/include/save_xmms.asm
new file mode 100644
index 000000000..c9fd67eb5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/save_xmms.asm
@@ -0,0 +1,132 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+
+%ifdef LINUX
+%define ARG1 rdi
+%else
+%define ARG1 rcx
+%endif
+
+section .text
+; void save_xmms(UINT128 array[10])
+MKGLOBAL(save_xmms,function,internal)
+save_xmms:
+ movdqa [ARG1 + 0*16], xmm6
+ movdqa [ARG1 + 1*16], xmm7
+ movdqa [ARG1 + 2*16], xmm8
+ movdqa [ARG1 + 3*16], xmm9
+ movdqa [ARG1 + 4*16], xmm10
+ movdqa [ARG1 + 5*16], xmm11
+ movdqa [ARG1 + 6*16], xmm12
+ movdqa [ARG1 + 7*16], xmm13
+ movdqa [ARG1 + 8*16], xmm14
+ movdqa [ARG1 + 9*16], xmm15
+ ret
+
+
+; void restore_xmms(UINT128 array[10])
+MKGLOBAL(restore_xmms,function,internal)
+restore_xmms:
+ movdqa xmm6, [ARG1 + 0*16]
+ movdqa xmm7, [ARG1 + 1*16]
+ movdqa xmm8, [ARG1 + 2*16]
+ movdqa xmm9, [ARG1 + 3*16]
+ movdqa xmm10, [ARG1 + 4*16]
+ movdqa xmm11, [ARG1 + 5*16]
+ movdqa xmm12, [ARG1 + 6*16]
+ movdqa xmm13, [ARG1 + 7*16]
+ movdqa xmm14, [ARG1 + 8*16]
+ movdqa xmm15, [ARG1 + 9*16]
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ pxor xmm0, xmm0
+ movdqa [ARG1 + 0 * 16], xmm0
+ movdqa [ARG1 + 1 * 16], xmm0
+ movdqa [ARG1 + 2 * 16], xmm0
+ movdqa [ARG1 + 3 * 16], xmm0
+ movdqa [ARG1 + 4 * 16], xmm0
+ movdqa [ARG1 + 5 * 16], xmm0
+ movdqa [ARG1 + 6 * 16], xmm0
+ movdqa [ARG1 + 7 * 16], xmm0
+ movdqa [ARG1 + 8 * 16], xmm0
+ movdqa [ARG1 + 9 * 16], xmm0
+%endif
+
+ ret
+
+
+ ; void save_xmms_avx(UINT128 array[10])
+MKGLOBAL(save_xmms_avx,function,internal)
+save_xmms_avx:
+ vmovdqa [ARG1 + 0*16], xmm6
+ vmovdqa [ARG1 + 1*16], xmm7
+ vmovdqa [ARG1 + 2*16], xmm8
+ vmovdqa [ARG1 + 3*16], xmm9
+ vmovdqa [ARG1 + 4*16], xmm10
+ vmovdqa [ARG1 + 5*16], xmm11
+ vmovdqa [ARG1 + 6*16], xmm12
+ vmovdqa [ARG1 + 7*16], xmm13
+ vmovdqa [ARG1 + 8*16], xmm14
+ vmovdqa [ARG1 + 9*16], xmm15
+ ret
+
+
+; void restore_xmms_avx(UINT128 array[10])
+MKGLOBAL(restore_xmms_avx,function,internal)
+restore_xmms_avx:
+ vmovdqa xmm6, [ARG1 + 0*16]
+ vmovdqa xmm7, [ARG1 + 1*16]
+ vmovdqa xmm8, [ARG1 + 2*16]
+ vmovdqa xmm9, [ARG1 + 3*16]
+ vmovdqa xmm10, [ARG1 + 4*16]
+ vmovdqa xmm11, [ARG1 + 5*16]
+ vmovdqa xmm12, [ARG1 + 6*16]
+ vmovdqa xmm13, [ARG1 + 7*16]
+ vmovdqa xmm14, [ARG1 + 8*16]
+ vmovdqa xmm15, [ARG1 + 9*16]
+
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ vpxor xmm0, xmm0
+ vmovdqa [ARG1 + 0 * 16], xmm0
+ vmovdqa [ARG1 + 1 * 16], xmm0
+ vmovdqa [ARG1 + 2 * 16], xmm0
+ vmovdqa [ARG1 + 3 * 16], xmm0
+ vmovdqa [ARG1 + 4 * 16], xmm0
+ vmovdqa [ARG1 + 5 * 16], xmm0
+ vmovdqa [ARG1 + 6 * 16], xmm0
+ vmovdqa [ARG1 + 7 * 16], xmm0
+ vmovdqa [ARG1 + 8 * 16], xmm0
+ vmovdqa [ARG1 + 9 * 16], xmm0
+%endif
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/include/save_xmms.h b/src/spdk/intel-ipsec-mb/include/save_xmms.h
new file mode 100644
index 000000000..e711958da
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/save_xmms.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef SAVE_XMMS_H
+#define SAVE_XMMS_H
+
+#include "intel-ipsec-mb.h"
+
+void save_xmms(uint128_t array[10]);
+void restore_xmms(uint128_t array[10]);
+
+void save_xmms_avx(uint128_t array[10]);
+void restore_xmms_avx(uint128_t array[10]);
+
+#endif /* SAVE_XMMS_H */
diff --git a/src/spdk/intel-ipsec-mb/include/snow3g.h b/src/spdk/intel-ipsec-mb/include/snow3g.h
new file mode 100644
index 000000000..520a4b41f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/snow3g.h
@@ -0,0 +1,511 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef _SNOW3G_H_
+#define _SNOW3G_H_
+
+/*******************************************************************************
+ * SSE
+ ******************************************************************************/
+void
+snow3g_f8_1_buffer_bit_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+
+void
+snow3g_f8_1_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void
+snow3g_f8_2_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+
+void
+snow3g_f8_4_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4);
+
+void
+snow3g_f8_8_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5,
+ void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6,
+ void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7,
+ void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8,
+ void *pBufferOut8,
+ const uint32_t lengthInBytes8);
+
+void
+snow3g_f8_8_buffer_multikey_sse(const snow3g_key_schedule_t * const pCtx[],
+ const void * const pIV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t lengthInBytes[]);
+
+void
+snow3g_f8_n_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f8_n_buffer_multikey_sse(const snow3g_key_schedule_t * const pCtx[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f9_1_buffer_sse(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ const uint64_t lengthInBits,
+ void *pDigest);
+
+size_t
+snow3g_key_sched_size_sse(void);
+
+int
+snow3g_init_key_sched_sse(const void *pKey, snow3g_key_schedule_t *pCtx);
+
+/*******************************************************************************
+ * SSE NO-AESNI
+ ******************************************************************************/
+void
+snow3g_f8_1_buffer_bit_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+
+void
+snow3g_f8_1_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void
+snow3g_f8_2_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+
+void
+snow3g_f8_4_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4);
+
+void
+snow3g_f8_8_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5,
+ void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6,
+ void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7,
+ void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8,
+ void *pBufferOut8,
+ const uint32_t lengthInBytes8);
+
+void
+snow3g_f8_8_buffer_multikey_sse_no_aesni(const snow3g_key_schedule_t * const
+ pCtx[],
+ const void * const pIV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t lengthInBytes[]);
+
+void
+snow3g_f8_n_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f8_n_buffer_multikey_sse_no_aesni(const snow3g_key_schedule_t * const
+ pCtx[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f9_1_buffer_sse_no_aesni(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ const uint64_t lengthInBits,
+ void *pDigest);
+
+size_t
+snow3g_key_sched_size_sse_no_aesni(void);
+
+int
+snow3g_init_key_sched_sse_no_aesni(const void *pKey,
+ snow3g_key_schedule_t *pCtx);
+
+/*******************************************************************************
+ * AVX
+ ******************************************************************************/
+void
+snow3g_f8_1_buffer_bit_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+
+void
+snow3g_f8_1_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void
+snow3g_f8_2_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+
+void
+snow3g_f8_4_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4);
+
+void
+snow3g_f8_8_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5,
+ void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6,
+ void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7,
+ void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8,
+ void *pBufferOut8,
+ const uint32_t lengthInBytes8);
+
+void
+snow3g_f8_8_buffer_multikey_avx(const snow3g_key_schedule_t * const pCtx[],
+ const void * const pIV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t lengthInBytes[]);
+
+void
+snow3g_f8_n_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f8_n_buffer_multikey_avx(const snow3g_key_schedule_t * const pCtx[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f9_1_buffer_avx(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ const uint64_t lengthInBits,
+ void *pDigest);
+
+size_t
+snow3g_key_sched_size_avx(void);
+
+int
+snow3g_init_key_sched_avx(const void *pKey, snow3g_key_schedule_t *pCtx);
+
+/*******************************************************************************
+ * AVX2
+ ******************************************************************************/
+
+void
+snow3g_f8_1_buffer_bit_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits);
+
+void
+snow3g_f8_1_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void
+snow3g_f8_2_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2);
+
+void
+snow3g_f8_4_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4);
+
+void
+snow3g_f8_8_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5,
+ void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6,
+ void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7,
+ void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8,
+ void *pBufferOut8,
+ const uint32_t lengthInBytes8);
+
+void
+snow3g_f8_8_buffer_multikey_avx2(const snow3g_key_schedule_t * const pCtx[],
+ const void * const pIV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t lengthInBytes[]);
+
+void
+snow3g_f8_n_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f8_n_buffer_multikey_avx2(const snow3g_key_schedule_t * const pCtx[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufferLenInBytes[],
+ const uint32_t bufferCount);
+
+void
+snow3g_f9_1_buffer_avx2(const snow3g_key_schedule_t *pCtx,
+ const void *pIV,
+ const void *pBufferIn,
+ const uint64_t lengthInBits,
+ void *pDigest);
+
+size_t
+snow3g_key_sched_size_avx2(void);
+
+int
+snow3g_init_key_sched_avx2(const void *pKey, snow3g_key_schedule_t *pCtx);
+
+#endif /* _SNOW3G_H_ */
diff --git a/src/spdk/intel-ipsec-mb/include/snow3g_common.h b/src/spdk/intel-ipsec-mb/include/snow3g_common.h
new file mode 100644
index 000000000..d7c7e63c1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/snow3g_common.h
@@ -0,0 +1,2840 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/*-----------------------------------------------------------------------
+ *
+ * An implementation of SNOW 3G, the core algorithm for the
+ * 3GPP Confidentiality and Integrity algorithms.
+ *
+ *-----------------------------------------------------------------------*/
+
+#ifndef SNOW3G_COMMON_H
+#define SNOW3G_COMMON_H
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "intel-ipsec-mb.h"
+#include "include/snow3g.h"
+#include "include/snow3g_internal.h"
+#include "clear_regs_mem.h"
+
+#define CLEAR_MEM clear_mem
+#define CLEAR_VAR clear_var
+
+/* -------------------------------------------------------------------
+ * LFSR array shift by 1 position, 4 packets at a time
+ * ------------------------------------------------------------------ */
+
+#ifdef AVX2
+/* LFSR array shift */
+static inline void ShiftLFSR_8(snow3gKeyState8_t *pCtx)
+{
+ pCtx->iLFSR_X = (pCtx->iLFSR_X + 1) & 15;
+}
+#endif /* AVX2 */
+
+/* LFSR array shift */
+static inline void ShiftLFSR_4(snow3gKeyState4_t *pCtx)
+{
+ pCtx->iLFSR_X = (pCtx->iLFSR_X + 1) % 16;
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Gf2 modular multiplication/reduction
+ *
+ *---------------------------------------------------------*/
+static inline uint64_t multiply_and_reduce64(uint64_t a, uint64_t b)
+{
+ uint64_t msk;
+ uint64_t res = 0;
+ uint64_t i = 64;
+
+ while (i--) {
+ msk = ((int64_t)res >> 63) & 0x1b;
+ res <<= 1;
+ res ^= msk;
+ msk = ((int64_t)b >> 63) & a;
+ b <<= 1;
+ res ^= msk;
+ }
+ return res;
+}
+
+#ifdef AVX2
+/* -------------------------------------------------------------------
+ * ClockLFSR sub-function as defined in snow3g standard
+ * S = LFSR[2]
+ * ^ table_Alpha_div[LFSR[11] & 0xff]
+ * ^ table_Alpha_mul[LFSR[0] & 0xff]
+ * ------------------------------------------------------------------ */
+static void C0_C11_8(__m256i *S, const __m256i *L0, const __m256i *L11)
+{
+ __m256i mask, Sx, B11, B0, offset;
+
+ offset = _mm256_set1_epi32(3);
+ mask = _mm256_setr_epi32(0xF0F0F000, 0xF0F0F004, 0xF0F0F008, 0xF0F0F00C,
+ 0xF0F0F000, 0xF0F0F004, 0xF0F0F008,
+ 0xF0F0F00C);
+ B11 = _mm256_shuffle_epi8(*L11, mask);
+ *S = _mm256_i32gather_epi32(snow3g_table_A_div, B11, 4);
+
+ mask = _mm256_add_epi32(mask, offset);
+ B0 = _mm256_shuffle_epi8(*L0, mask);
+ Sx = _mm256_i32gather_epi32(snow3g_table_A_mul, B0, 4);
+ *S = _mm256_xor_si256(*S, Sx);
+}
+#endif /* AVX2 */
+
+/* -------------------------------------------------------------------
+ * ClockLFSR sub-function as defined in snow3g standard
+ * S = LFSR[2]
+ * ^ table_Alpha_div[LFSR[11] & 0xff]
+ * ^ table_Alpha_mul[LFSR[0] & 0xff]
+ * ------------------------------------------------------------------ */
+static inline void C0_C11_4(uint32_t *S, const __m128i *L0, const __m128i *L11)
+{
+ unsigned B11[4], B0[4];
+
+ B11[0] = _mm_extract_epi8(*L11, 0);
+ B11[1] = _mm_extract_epi8(*L11, 4);
+ B11[2] = _mm_extract_epi8(*L11, 8);
+ B11[3] = _mm_extract_epi8(*L11, 12);
+
+ S[0] = snow3g_table_A_div[B11[0]];
+ S[1] = snow3g_table_A_div[B11[1]];
+ S[2] = snow3g_table_A_div[B11[2]];
+ S[3] = snow3g_table_A_div[B11[3]];
+
+ B0[0] = _mm_extract_epi8(*L0, 3);
+ B0[1] = _mm_extract_epi8(*L0, 7);
+ B0[2] = _mm_extract_epi8(*L0, 11);
+ B0[3] = _mm_extract_epi8(*L0, 15);
+
+ S[0] ^= snow3g_table_A_mul[B0[0]];
+ S[1] ^= snow3g_table_A_mul[B0[1]];
+ S[2] ^= snow3g_table_A_mul[B0[2]];
+ S[3] ^= snow3g_table_A_mul[B0[3]];
+}
+
+#ifdef AVX2
+/* -------------------------------------------------------------------
+ * ClockLFSR function as defined in snow3g standard
+ * S = table_Alpha_div[LFSR[11] & 0xff]
+ * ^ table_Alpha_mul[LFSR[0] >> 24]
+ * ^ LFSR[2] ^ LFSR[0] << 8 ^ LFSR[11] >> 8
+ * ------------------------------------------------------------------ */
+static inline void ClockLFSR_8(snow3gKeyState8_t *pCtx)
+{
+ __m256i X2;
+ __m256i S, T, U;
+
+ U = pCtx->LFSR_X[pCtx->iLFSR_X];
+ S = pCtx->LFSR_X[(pCtx->iLFSR_X + 11) % 16];
+
+ C0_C11_8(&X2, &U, &S);
+
+ T = _mm256_slli_epi32(U, 8);
+ S = _mm256_srli_epi32(S, 8);
+ U = _mm256_xor_si256(T, pCtx->LFSR_X[(pCtx->iLFSR_X + 2) % 16]);
+
+ ShiftLFSR_8(pCtx);
+
+ S = _mm256_xor_si256(S, U);
+ S = _mm256_xor_si256(S, X2);
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16] = S;
+}
+#endif /* AVX2 */
+
+/* -------------------------------------------------------------------
+ * ClockLFSR function as defined in snow3g standard
+ * S = table_Alpha_div[LFSR[11] & 0xff]
+ * ^ table_Alpha_mul[LFSR[0] >> 24]
+ * ^ LFSR[2] ^ LFSR[0] << 8 ^ LFSR[11] >> 8
+ * ------------------------------------------------------------------ */
+static inline void ClockLFSR_4(snow3gKeyState4_t *pCtx)
+{
+ uint32_t X2[4];
+ __m128i S, T, U;
+
+ U = pCtx->LFSR_X[pCtx->iLFSR_X];
+ S = pCtx->LFSR_X[(pCtx->iLFSR_X + 11) % 16];
+ C0_C11_4(X2, &U, &S);
+
+ T = _mm_slli_epi32(U, 8);
+ S = _mm_srli_epi32(S, 8);
+ U = _mm_xor_si128(T, pCtx->LFSR_X[(pCtx->iLFSR_X + 2) % 16]);
+ ShiftLFSR_4(pCtx);
+
+ /* (SSE4) */
+ T = _mm_insert_epi32(T, X2[0], 0);
+ T = _mm_insert_epi32(T, X2[1], 1);
+ T = _mm_insert_epi32(T, X2[2], 2);
+ T = _mm_insert_epi32(T, X2[3], 3);
+ S = _mm_xor_si128(S, U);
+ S = _mm_xor_si128(S, T);
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16] = S;
+}
+
+#ifdef AVX2
+/* -------------------------------------------------------------------
+ * ClockFSM function as defined in snow3g standard
+ * 8 packets at a time
+ * ------------------------------------------------------------------ */
+static inline void ClockFSM_8(snow3gKeyState8_t *pCtx, __m256i *data)
+{
+ __m256i F, R, S2T0, S2T1, S2T2, S2T3, S1T0, S1T1, S1T2, S1T3;
+ __m256i w3, w2, w1, w0, offset, mask;
+
+ F = _mm256_add_epi32(pCtx->LFSR_X[(pCtx->iLFSR_X + 15)%16],
+ pCtx->FSM_X[0]);
+ R = _mm256_xor_si256(pCtx->LFSR_X[(pCtx->iLFSR_X + 5)%16],
+ pCtx->FSM_X[2]);
+ *data = _mm256_xor_si256(F, pCtx->FSM_X[1]);
+ R = _mm256_add_epi32(R, pCtx->FSM_X[1]);
+ offset = _mm256_set1_epi32(0x1);
+
+ F = pCtx->FSM_X[1];
+ w3 = _mm256_setr_epi32(0xF0F0F000, 0xF0F0F004, 0xF0F0F008,
+ 0xF0F0F00C, 0xF0F0F000, 0xF0F0F004,
+ 0xF0F0F008, 0xF0F0F00C);
+ mask = _mm256_shuffle_epi8(F,w3);
+ S2T0 = _mm256_i32gather_epi32(S2_T0,mask,4);
+
+ w2 = _mm256_add_epi32(w3,offset);
+ mask = _mm256_shuffle_epi8(F,w2);
+ S2T1 = _mm256_i32gather_epi32(S2_T1,mask,4);
+
+ w1 = _mm256_add_epi32(w2,offset);
+ mask = _mm256_shuffle_epi8(pCtx->FSM_X[1],w1);
+ S2T2 = _mm256_i32gather_epi32(S2_T2,mask,4);
+
+ w0 = _mm256_add_epi32(w1,offset);
+ mask = _mm256_shuffle_epi8(F,w0);
+ S2T3 = _mm256_i32gather_epi32(S2_T3,mask,4);
+
+
+ F = pCtx->FSM_X[0];
+ w3 = _mm256_setr_epi32(0xF0F0F000, 0xF0F0F004, 0xF0F0F008,
+ 0xF0F0F00C, 0xF0F0F010, 0xF0F0F014,
+ 0xF0F0F018, 0xF0F0F01C);
+ mask = _mm256_shuffle_epi8(F,w3);
+ S1T0 = _mm256_i32gather_epi32(S1_T0,mask,4);
+
+ w2 = _mm256_add_epi32(w3,offset);
+ mask = _mm256_shuffle_epi8(F,w2);
+ S1T1 = _mm256_i32gather_epi32(S1_T1,mask,4);
+
+ w1 = _mm256_add_epi32(w2,offset);
+ mask = _mm256_shuffle_epi8(F,w1);
+ S1T2 = _mm256_i32gather_epi32(S1_T2,mask,4);
+
+ w0 = _mm256_add_epi32(w1,offset);
+ mask = _mm256_shuffle_epi8(F,w0);
+ S1T3 = _mm256_i32gather_epi32(S1_T3,mask,4);
+
+ S2T0 = _mm256_xor_si256(S2T0, S2T1);
+ S2T2 = _mm256_xor_si256(S2T2, S2T3);
+ S2T0 = _mm256_xor_si256(S2T0, S2T2);
+
+ S1T0 = _mm256_xor_si256(S1T0, S1T1);
+ S1T2 = _mm256_xor_si256(S1T2, S1T3);
+ S1T0 = _mm256_xor_si256(S1T0, S1T2);
+
+
+ pCtx->FSM_X[2] = S2T0;
+ pCtx->FSM_X[1] = S1T0;
+ pCtx->FSM_X[2] = S2T0;
+ pCtx->FSM_X[0] = R;
+}
+
+#endif /* AVX2 */
+
+/* -------------------------------------------------------------------
+ * ClockFSM function as defined in snow3g standard
+ * 4 packets at a time
+ * ------------------------------------------------------------------ */
+static inline void ClockFSM_4(snow3gKeyState4_t *pCtx, __m128i *data)
+{
+ __m128i F, R;
+#ifdef _WIN32
+#pragma warning(push)
+#pragma warning(disable:4556)
+#endif
+#if defined (NO_AESNI) || defined (SAFE_LOOKUP)
+ uint32_t L = 0;
+#endif
+ uint32_t K = 0;
+
+ F = _mm_add_epi32(pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16],
+ pCtx->FSM_X[0]);
+ R = _mm_xor_si128(pCtx->LFSR_X[(pCtx->iLFSR_X + 5) % 16],
+ pCtx->FSM_X[2]);
+ *data = _mm_xor_si128(F, pCtx->FSM_X[1]);
+ R = _mm_add_epi32(R, pCtx->FSM_X[1]);
+#if defined (NO_AESNI) || defined (SAFE_LOOKUP)
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, L, 0);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, L, 1);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, L, 2);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, L, 3);
+#else
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, 0);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, 1);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, 2);
+ S1_S2_4(pCtx->FSM_X[2], pCtx->FSM_X[1], pCtx->FSM_X[0], K, 3);
+#endif /* NO_AESNI */
+ pCtx->FSM_X[0] = R;
+
+#ifdef _WIN32
+#pragma warning(pop)
+#endif
+}
+
+/**
+*******************************************************************************
+* @description
+* This function generates 4 bytes of keystream 1 buffer at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Pointer to generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_1_4(snow3gKeyState1_t *pCtx,
+ uint32_t *pKeyStream)
+{
+ uint32_t F;
+
+ ClockFSM_1(pCtx, &F);
+ *pKeyStream = F ^ pCtx->LFSR_S[0];
+ ClockLFSR_1(pCtx);
+}
+
+/**
+*******************************************************************************
+* @description
+* This function generates 8 bytes of keystream 1 buffer at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Pointer to generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_1_8(snow3gKeyState1_t *pCtx,
+ uint64_t *pKeyStream)
+{
+ uint64_t F;
+ uint32_t FSM4;
+ uint32_t V0, V1;
+ uint32_t F0, F1;
+ uint32_t R0, R1;
+ uint32_t L0, L1, L11, L12;
+
+ /* Merged clock FSM + clock LFSR + clock FSM + clockLFSR
+ * in order to avoid redundancies in function processing
+ * and less instruction immediate dependencies
+ */
+ L0 = pCtx->LFSR_S[0];
+ V0 = pCtx->LFSR_S[2];
+ L1 = pCtx->LFSR_S[1];
+ V1 = pCtx->LFSR_S[3];
+ R1 = pCtx->FSM_R1;
+ L11 = pCtx->LFSR_S[11];
+ L12 = pCtx->LFSR_S[12];
+ V0 ^= snow3g_table_A_mul[L0 >> 24];
+ V1 ^= snow3g_table_A_mul[L1 >> 24];
+ V0 ^= snow3g_table_A_div[L11 & 0xff];
+ V1 ^= snow3g_table_A_div[L12 & 0xff];
+ V0 ^= L0 << 8;
+ V1 ^= L1 << 8;
+ V0 ^= L11 >> 8;
+ V1 ^= L12 >> 8;
+ F0 = pCtx->LFSR_S[15] + R1;
+ F0 ^= L0;
+ F0 ^= pCtx->FSM_R2;
+ R0 = pCtx->FSM_R3 ^ pCtx->LFSR_S[5];
+ R0 += pCtx->FSM_R2;
+ S1_S2_S3_1(pCtx->FSM_R3, pCtx->FSM_R2, R1, FSM4, R0);
+ R1 = pCtx->FSM_R3 ^ pCtx->LFSR_S[6];
+ F1 = V0 + R0;
+ F1 ^= L1;
+ F1 ^= pCtx->FSM_R2;
+ R1 += pCtx->FSM_R2;
+ pCtx->FSM_R3 = Snow3g_S2(pCtx->FSM_R2);
+ pCtx->FSM_R2 = FSM4;
+ pCtx->FSM_R1 = R1;
+
+ /* Shift LFSR twice */
+ ShiftTwiceLFSR_1(pCtx);
+
+ /* keystream mode LFSR update */
+ pCtx->LFSR_S[14] = V0;
+ pCtx->LFSR_S[15] = V1;
+
+ F = F0;
+ F <<= 32;
+ F |= (uint64_t)F1;
+
+ *pKeyStream = F;
+}
+
+#ifdef AVX2
+/**
+*******************************************************************************
+* @description
+* This function generates 8 bytes of keystream 8 buffers at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Pointer to generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_8_8(snow3gKeyState8_t *pCtx,
+ __m256i *pKeyStreamLo,
+ __m256i *pKeyStreamHi)
+{
+ __m256i H, L;
+
+ /* first set of 4 bytes */
+ ClockFSM_8(pCtx, &L);
+ L = _mm256_xor_si256(L, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_8(pCtx);
+
+ /* second set of 4 bytes */
+ ClockFSM_8(pCtx, &H);
+ H = _mm256_xor_si256(H, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_8(pCtx);
+
+ /* merge the 2 sets */
+ *pKeyStreamLo = _mm256_unpacklo_epi32(H, L);
+ *pKeyStreamHi = _mm256_unpackhi_epi32(H, L);
+}
+
+/**
+*******************************************************************************
+* @description
+* This function generates 4 bytes of keystream 8 buffers at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Pointer to generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_8_4(snow3gKeyState8_t *pCtx,
+ __m256i *pKeyStream)
+{
+ __m256i F;
+
+ ClockFSM_8(pCtx, &F);
+ *pKeyStream = _mm256_xor_si256(F, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_8(pCtx);
+}
+
+/**
+*****************************************************************************
+* @description
+* This function generates 32 bytes of keystream 8 buffers at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Array of generated keystreams
+*
+******************************************************************************/
+static inline void snow3g_keystream_8_32(snow3gKeyState8_t *pCtx,
+ __m256i *pKeyStream)
+{
+
+ __m256i temp[8];
+
+ /** produces the next 4 bytes for each buffer */
+ int i;
+
+ /** Byte reversal on each KS */
+ __m256i mask1 = {0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL,
+ 0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL};
+ /** Reversal, shifted 4 bytes right */
+ __m256i mask2 = {0x0405060708090a0bULL, 0x0c0d0e0f00010203ULL,
+ 0x0405060708090a0bULL, 0x0c0d0e0f00010203ULL};
+ /** Reversal, shifted 8 bytes right */
+ __m256i mask3 = {0x08090a0b0c0d0e0fULL, 0x0001020304050607ULL,
+ 0x08090a0b0c0d0e0fULL, 0x0001020304050607ULL};
+ /** Reversal, shifted 12 bytes right */
+ __m256i mask4 = {0x0c0d0e0f00010203ULL, 0x0405060708090a0bULL,
+ 0x0c0d0e0f00010203ULL, 0x0405060708090a0bULL};
+
+ snow3g_keystream_8_4(pCtx, &temp[0]);
+ snow3g_keystream_8_4(pCtx, &temp[1]);
+ snow3g_keystream_8_4(pCtx, &temp[2]);
+ snow3g_keystream_8_4(pCtx, &temp[3]);
+ snow3g_keystream_8_4(pCtx, &temp[4]);
+ snow3g_keystream_8_4(pCtx, &temp[5]);
+ snow3g_keystream_8_4(pCtx, &temp[6]);
+ snow3g_keystream_8_4(pCtx, &temp[7]);
+
+ temp[0] = _mm256_shuffle_epi8(temp[0], mask1);
+ temp[1] = _mm256_shuffle_epi8(temp[1], mask2);
+ temp[2] = _mm256_shuffle_epi8(temp[2], mask3);
+ temp[3] = _mm256_shuffle_epi8(temp[3], mask4);
+ temp[4] = _mm256_shuffle_epi8(temp[4], mask1);
+ temp[5] = _mm256_shuffle_epi8(temp[5], mask2);
+ temp[6] = _mm256_shuffle_epi8(temp[6], mask3);
+ temp[7] = _mm256_shuffle_epi8(temp[7], mask4);
+
+ __m256i blended[8];
+ /* blends KS together: 128bit slice consists
+ of 4 32-bit words for one packet */
+ blended[0] = _mm256_blend_epi32(temp[0], temp[1], 0xaa);
+ blended[1] = _mm256_blend_epi32(temp[0], temp[1], 0x55);
+ blended[2] = _mm256_blend_epi32(temp[2], temp[3], 0xaa);
+ blended[3] = _mm256_blend_epi32(temp[2], temp[3], 0x55);
+ blended[4] = _mm256_blend_epi32(temp[4], temp[5], 0xaa);
+ blended[5] = _mm256_blend_epi32(temp[4], temp[5], 0x55);
+ blended[6] = _mm256_blend_epi32(temp[6], temp[7], 0xaa);
+ blended[7] = _mm256_blend_epi32(temp[6], temp[7], 0x55);
+
+ temp[0] = _mm256_blend_epi32(blended[0], blended[2], 0xcc);
+ temp[1] = _mm256_blend_epi32(blended[1], blended[3], 0x99);
+ temp[2] = _mm256_blend_epi32(blended[0], blended[2], 0x33);
+ temp[3] = _mm256_blend_epi32(blended[1], blended[3], 0x66);
+ temp[4] = _mm256_blend_epi32(blended[4], blended[6], 0xcc);
+ temp[5] = _mm256_blend_epi32(blended[5], blended[7], 0x99);
+ temp[6] = _mm256_blend_epi32(blended[4], blended[6], 0x33);
+ temp[7] = _mm256_blend_epi32(blended[5], blended[7], 0x66);
+
+ /** sorts 32 bit words back into order */
+ blended[0] = temp[0];
+ blended[1] = _mm256_shuffle_epi32(temp[1], 0x39);
+ blended[2] = _mm256_shuffle_epi32(temp[2], 0x4e);
+ blended[3] = _mm256_shuffle_epi32(temp[3], 0x93);
+ blended[4] = temp[4];
+ blended[5] = _mm256_shuffle_epi32(temp[5], 0x39);
+ blended[6] = _mm256_shuffle_epi32(temp[6], 0x4e);
+ blended[7] = _mm256_shuffle_epi32(temp[7], 0x93);
+
+ for (i = 0; i < 4; i++) {
+ pKeyStream[i] = _mm256_permute2x128_si256(blended[i],
+ blended[i + 4], 0x20);
+ pKeyStream[i + 4] = _mm256_permute2x128_si256(
+ blended[i], blended[i + 4], 0x31);
+ }
+}
+
+#endif /* AVX2 */
+
+/**
+*******************************************************************************
+* @description
+* This function generates 4 bytes of keystream 4 buffers at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStream Pointer to generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_4_4(snow3gKeyState4_t *pCtx,
+ __m128i *pKeyStream)
+{
+ __m128i F;
+
+ ClockFSM_4(pCtx, &F);
+ *pKeyStream = _mm_xor_si128(F, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_4(pCtx);
+}
+
+/**
+*******************************************************************************
+* @description
+* This function generates 8 bytes of keystream 4 buffers at a time
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in/out] pKeyStreamLo Pointer to lower end of generated keystream
+* @param[in/out] pKeyStreamHi Pointer to higer end of generated keystream
+*
+*******************************************************************************/
+static inline void snow3g_keystream_4_8(snow3gKeyState4_t *pCtx,
+ __m128i *pKeyStreamLo,
+ __m128i *pKeyStreamHi)
+{
+ __m128i H, L;
+
+ /* first set of 4 bytes */
+ ClockFSM_4(pCtx, &L);
+ L = _mm_xor_si128(L, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_4(pCtx);
+
+ /* second set of 4 bytes */
+ ClockFSM_4(pCtx, &H);
+ H = _mm_xor_si128(H, pCtx->LFSR_X[pCtx->iLFSR_X]);
+ ClockLFSR_4(pCtx);
+
+ /* merge the 2 sets */
+ *pKeyStreamLo = _mm_unpacklo_epi32(H, L);
+ *pKeyStreamHi = _mm_unpackhi_epi32(H, L);
+}
+
+/**
+*******************************************************************************
+* @description
+* This function initializes the key schedule for 4 buffers for snow3g f8/f9.
+*
+* @param [in] pCtx Context where the scheduled keys are stored
+* @param [in] pKeySched Key schedule
+* @param [in] pIV1 IV for buffer 1
+* @param [in] pIV2 IV for buffer 2
+* @param [in] pIV3 IV for buffer 3
+* @param [in] pIV4 IV for buffer 4
+*
+*******************************************************************************/
+static inline void
+snow3gStateInitialize_4(snow3gKeyState4_t *pCtx,
+ const snow3g_key_schedule_t *pKeySched,
+ const void *pIV1, const void *pIV2,
+ const void *pIV3, const void *pIV4)
+{
+ uint32_t K, L;
+ int i;
+ __m128i R, S, T, U;
+ __m128i V0, V1, T0, T1;
+
+ /* Initialize the LFSR table from constants, Keys, and IV */
+
+ /* Load complete 128b IV into register (SSE2)*/
+ uint64_t sm[2] = {0x0405060700010203ULL, 0x0c0d0e0f08090a0bULL};
+ __m128i *swapMask = (__m128i *) sm;
+
+ R = _mm_loadu_si128((const __m128i *)pIV1);
+ S = _mm_loadu_si128((const __m128i *)pIV2);
+ T = _mm_loadu_si128((const __m128i *)pIV3);
+ U = _mm_loadu_si128((const __m128i *)pIV4);
+
+ /* initialize the array block (SSE4) */
+ for (i = 0; i < 4; i++) {
+ K = pKeySched->k[i];
+ L = ~K;
+ V0 = _mm_set1_epi32(K);
+ V1 = _mm_set1_epi32(L);
+ pCtx->LFSR_X[i + 4] = V0;
+ pCtx->LFSR_X[i + 12] = V0;
+ pCtx->LFSR_X[i + 0] = V1;
+ pCtx->LFSR_X[i + 8] = V1;
+ }
+ /* Update the schedule structure with IVs */
+ /* Store the 4 IVs in LFSR by a column/row matrix swap
+ * after endianness correction */
+
+ /* endianness swap (SSSE3) */
+ R = _mm_shuffle_epi8(R, *swapMask);
+ S = _mm_shuffle_epi8(S, *swapMask);
+ T = _mm_shuffle_epi8(T, *swapMask);
+ U = _mm_shuffle_epi8(U, *swapMask);
+
+ /* row/column dword inversion (SSE2) */
+ T0 = _mm_unpacklo_epi32(R, S);
+ R = _mm_unpackhi_epi32(R, S);
+ T1 = _mm_unpacklo_epi32(T, U);
+ T = _mm_unpackhi_epi32(T, U);
+
+ /* row/column qword inversion (SSE2) */
+ U = _mm_unpackhi_epi64(R, T);
+ T = _mm_unpacklo_epi64(R, T);
+ S = _mm_unpackhi_epi64(T0, T1);
+ R = _mm_unpacklo_epi64(T0, T1);
+
+ /*IV ^ LFSR (SSE2) */
+ pCtx->LFSR_X[15] = _mm_xor_si128(pCtx->LFSR_X[15], U);
+ pCtx->LFSR_X[12] = _mm_xor_si128(pCtx->LFSR_X[12], T);
+ pCtx->LFSR_X[10] = _mm_xor_si128(pCtx->LFSR_X[10], S);
+ pCtx->LFSR_X[9] = _mm_xor_si128(pCtx->LFSR_X[9], R);
+ pCtx->iLFSR_X = 0;
+ /* FSM initialization (SSE2) */
+ S = _mm_setzero_si128();
+ for (i = 0; i < 3; i++)
+ pCtx->FSM_X[i] = S;
+
+ /* Initialisation rounds */
+ for (i = 0; i < 32; i++) {
+ ClockFSM_4(pCtx, &S);
+ ClockLFSR_4(pCtx);
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16] = _mm_xor_si128(
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16], S);
+ }
+}
+
+#ifdef AVX2
+/**
+*******************************************************************************
+* @description
+* This function intializes the key schedule for 8 buffers with
+* individual keys, for snow3g f8/f9.
+*
+* @param [in] pCtx Context where scheduled keys are stored
+* @param [in] pKeySched Key schedule
+* @param [in] pIV1 IV for buffer 1
+* @param [in] pIV2 IV for buffer 2
+* @param [in] pIV3 IV for buffer 3
+* @param [in] pIV4 IV for buffer 4
+* @param [in] pIV5 IV for buffer 5
+* @param [in] pIV6 IV for buffer 6
+* @param [in] pIV7 IV for buffer 7
+* @param [in] pIV8 IV for buffer 8
+*
+*******************************************************************************/
+static inline void
+snow3gStateInitialize_8_multiKey(snow3gKeyState8_t *pCtx,
+ const snow3g_key_schedule_t * const KeySched[],
+ const void * const pIV[])
+{
+ DECLARE_ALIGNED(uint32_t k[8], 32);
+ DECLARE_ALIGNED(uint32_t l[8], 32);
+ __m256i *K = (__m256i *)k;
+ __m256i *L = (__m256i *)l;
+
+ int i, j;
+ __m256i mR, mS, mT, mU, T0, T1;
+
+ /* Initialize the LFSR table from constants, Keys, and IV */
+
+ /* Load complete 256b IV into register (SSE2)*/
+ __m256i swapMask = {0x0405060700010203ULL, 0x0c0d0e0f08090a0bULL,
+ 0x0405060700010203ULL, 0x0c0d0e0f08090a0bULL};
+ mR = _mm256_loadu2_m128i((const __m128i *)pIV[4],
+ (const __m128i *)pIV[0]);
+ mS = _mm256_loadu2_m128i((const __m128i *)pIV[5],
+ (const __m128i *)pIV[1]);
+ mT = _mm256_loadu2_m128i((const __m128i *)pIV[6],
+ (const __m128i *)pIV[2]);
+ mU = _mm256_loadu2_m128i((const __m128i *)pIV[7],
+ (const __m128i *)pIV[3]);
+
+ /* initialize the array block (SSE4) */
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++) {
+ k[j] = KeySched[j]->k[i];
+ l[j] = ~k[j];
+ }
+
+ pCtx->LFSR_X[i + 4] = *K;
+ pCtx->LFSR_X[i + 12] = *K;
+ pCtx->LFSR_X[i + 0] = *L;
+ pCtx->LFSR_X[i + 8] = *L;
+ }
+
+ /* Update the schedule structure with IVs */
+ /* Store the 4 IVs in LFSR by a column/row matrix swap
+ * after endianness correction */
+
+ /* endianness swap (SSSE3) */
+ mR = _mm256_shuffle_epi8(mR, swapMask);
+ mS = _mm256_shuffle_epi8(mS, swapMask);
+ mT = _mm256_shuffle_epi8(mT, swapMask);
+ mU = _mm256_shuffle_epi8(mU, swapMask);
+
+ /* row/column dword inversion (SSE2) */
+ T0 = _mm256_unpacklo_epi32(mR, mS);
+ mR = _mm256_unpackhi_epi32(mR, mS);
+ T1 = _mm256_unpacklo_epi32(mT, mU);
+ mT = _mm256_unpackhi_epi32(mT, mU);
+
+ /* row/column qword inversion (SSE2) */
+ mU = _mm256_unpackhi_epi64(mR, mT);
+ mT = _mm256_unpacklo_epi64(mR, mT);
+ mS = _mm256_unpackhi_epi64(T0, T1);
+ mR = _mm256_unpacklo_epi64(T0, T1);
+
+ /*IV ^ LFSR (SSE2) */
+ pCtx->LFSR_X[15] = _mm256_xor_si256(pCtx->LFSR_X[15], mU);
+ pCtx->LFSR_X[12] = _mm256_xor_si256(pCtx->LFSR_X[12], mT);
+ pCtx->LFSR_X[10] = _mm256_xor_si256(pCtx->LFSR_X[10], mS);
+ pCtx->LFSR_X[9] = _mm256_xor_si256(pCtx->LFSR_X[9], mR);
+ pCtx->iLFSR_X = 0;
+ /* FSM initialization (SSE2) */
+ mS = _mm256_setzero_si256();
+ for (i = 0; i < 3; i++)
+ pCtx->FSM_X[i] = mS;
+
+ /* Initialisation rounds */
+ for (i = 0; i < 32; i++) {
+ ClockFSM_8(pCtx, &mS);
+ ClockLFSR_8(pCtx);
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16] = _mm256_xor_si256(
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16], mS);
+ }
+}
+
+/**
+*******************************************************************************
+* @description
+* This function initializes the key schedule for 8 buffers for snow3g f8/f9.
+*
+* @param [in] pCtx Context where the scheduled keys are stored
+* @param [in] pKeySched Key schedule
+* @param [in] pIV1 IV for buffer 1
+* @param [in] pIV2 IV for buffer 2
+* @param [in] pIV3 IV for buffer 3
+* @param [in] pIV4 IV for buffer 4
+* @param [in] pIV5 IV for buffer 5
+* @param [in] pIV6 IV for buffer 6
+* @param [in] pIV7 IV for buffer 7
+* @param [in] pIV8 IV for buffer 8
+*
+*******************************************************************************/
+static inline void
+snow3gStateInitialize_8(snow3gKeyState8_t *pCtx,
+ const snow3g_key_schedule_t *pKeySched,
+ const void *pIV1, const void *pIV2,
+ const void *pIV3, const void *pIV4,
+ const void *pIV5, const void *pIV6,
+ const void *pIV7, const void *pIV8)
+{
+ uint32_t K, L;
+ int i;
+ __m256i mR, mS, mT, mU, V0, V1, T0, T1;
+
+ /* Initialize the LFSR table from constants, Keys, and IV */
+
+ /* Load complete 256b IV into register (SSE2)*/
+ __m256i swapMask = {0x0405060700010203ULL, 0x0c0d0e0f08090a0bULL,
+ 0x0405060700010203ULL, 0x0c0d0e0f08090a0bULL};
+ mR = _mm256_loadu2_m128i((const __m128i *)pIV5, (const __m128i *)pIV1);
+ mS = _mm256_loadu2_m128i((const __m128i *)pIV6, (const __m128i *)pIV2);
+ mT = _mm256_loadu2_m128i((const __m128i *)pIV7, (const __m128i *)pIV3);
+ mU = _mm256_loadu2_m128i((const __m128i *)pIV8, (const __m128i *)pIV4);
+
+ /* initialize the array block (SSE4) */
+ for (i = 0; i < 4; i++) {
+ K = pKeySched->k[i];
+ L = ~K;
+ V0 = _mm256_set1_epi32(K);
+ V1 = _mm256_set1_epi32(L);
+ pCtx->LFSR_X[i + 4] = V0;
+ pCtx->LFSR_X[i + 12] = V0;
+ pCtx->LFSR_X[i + 0] = V1;
+ pCtx->LFSR_X[i + 8] = V1;
+ }
+
+ /* Update the schedule structure with IVs */
+ /* Store the 4 IVs in LFSR by a column/row matrix swap
+ * after endianness correction */
+
+ /* endianness swap (SSSE3) */
+ mR = _mm256_shuffle_epi8(mR, swapMask);
+ mS = _mm256_shuffle_epi8(mS, swapMask);
+ mT = _mm256_shuffle_epi8(mT, swapMask);
+ mU = _mm256_shuffle_epi8(mU, swapMask);
+
+ /* row/column dword inversion (SSE2) */
+ T0 = _mm256_unpacklo_epi32(mR, mS);
+ mR = _mm256_unpackhi_epi32(mR, mS);
+ T1 = _mm256_unpacklo_epi32(mT, mU);
+ mT = _mm256_unpackhi_epi32(mT, mU);
+
+ /* row/column qword inversion (SSE2) */
+ mU = _mm256_unpackhi_epi64(mR, mT);
+ mT = _mm256_unpacklo_epi64(mR, mT);
+ mS = _mm256_unpackhi_epi64(T0, T1);
+ mR = _mm256_unpacklo_epi64(T0, T1);
+
+ /*IV ^ LFSR (SSE2) */
+ pCtx->LFSR_X[15] = _mm256_xor_si256(pCtx->LFSR_X[15], mU);
+ pCtx->LFSR_X[12] = _mm256_xor_si256(pCtx->LFSR_X[12], mT);
+ pCtx->LFSR_X[10] = _mm256_xor_si256(pCtx->LFSR_X[10], mS);
+ pCtx->LFSR_X[9] = _mm256_xor_si256(pCtx->LFSR_X[9], mR);
+ pCtx->iLFSR_X = 0;
+ /* FSM initialization (SSE2) */
+ mS = _mm256_setzero_si256();
+ for (i = 0; i < 3; i++)
+ pCtx->FSM_X[i] = mS;
+
+ /* Initialisation rounds */
+ for (i = 0; i < 32; i++) {
+ ClockFSM_8(pCtx, &mS);
+ ClockLFSR_8(pCtx);
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16] = _mm256_xor_si256(
+ pCtx->LFSR_X[(pCtx->iLFSR_X + 15) % 16], mS);
+ }
+}
+#endif /* AVX2 */
+
+static inline void
+preserve_bits(uint64_t *KS,
+ const uint8_t *pcBufferOut, const uint8_t *pcBufferIn,
+ SafeBuf *safeOutBuf, SafeBuf *safeInBuf,
+ const uint8_t bit_len, const uint8_t byte_len)
+{
+ const uint64_t mask = UINT64_MAX << (SNOW3G_BLOCK_SIZE * 8 - bit_len);
+
+ /* Clear the last bits of the keystream and the input
+ * (input only in out-of-place case) */
+ *KS &= mask;
+ if (pcBufferIn != pcBufferOut) {
+ const uint64_t swapMask = BSWAP64(mask);
+
+ safeInBuf->b64 &= swapMask;
+
+ /*
+ * Merge the last bits from the output, to be preserved,
+ * in the keystream, to be XOR'd with the input
+ * (which last bits are 0, maintaining the output bits)
+ */
+ memcpy_keystrm(safeOutBuf->b8, pcBufferOut, byte_len);
+ *KS |= BSWAP64(safeOutBuf->b64 & ~swapMask);
+ }
+}
+
+/**
+*******************************************************************************
+* @description
+* This function is the core snow3g bit algorithm
+* for the 3GPP confidentiality algorithm
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in] pBufferIn Input buffer
+* @param[out] pBufferOut Output buffer
+* @param[in] cipherLengthInBits length in bits of the data to be encrypted
+* @param[in] bitOffset offset in input buffer, where data are valid
+*
+*******************************************************************************/
+static inline void f8_snow3g_bit(snow3gKeyState1_t *pCtx,
+ const void *pIn,
+ void *pOut,
+ const uint32_t lengthInBits,
+ const uint32_t offsetInBits)
+{
+ const uint8_t *pBufferIn = pIn;
+ uint8_t *pBufferOut = pOut;
+ uint32_t cipherLengthInBits = lengthInBits;
+ uint64_t shiftrem = 0;
+ uint64_t KS8, KS8bit; /* 8 bytes of keystream */
+ const uint8_t *pcBufferIn = pBufferIn + (offsetInBits / 8);
+ uint8_t *pcBufferOut = pBufferOut + (offsetInBits / 8);
+ /* Offset into the first byte (0 - 7 bits) */
+ uint32_t remainOffset = offsetInBits % 8;
+ uint32_t byteLength = (cipherLengthInBits + 7) / 8;
+ SafeBuf safeInBuf = {0};
+ SafeBuf safeOutBuf = {0};
+
+ /* Now run the block cipher */
+
+ /* Start with potential partial block (due to offset and length) */
+ snow3g_keystream_1_8(pCtx, &KS8);
+ KS8bit = KS8 >> remainOffset;
+ /* Only one block to encrypt */
+ if (cipherLengthInBits < (64 - remainOffset)) {
+ byteLength = (cipherLengthInBits + 7) / 8;
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn, byteLength);
+ /*
+ * If operation is Out-of-place and there is offset
+ * to be applied, "remainOffset" bits from the output buffer
+ * need to be preserved (only applicable to first byte,
+ * since remainOffset is up to 7 bits)
+ */
+ if ((pIn != pOut) && remainOffset) {
+ const uint8_t mask8 = (uint8_t)
+ (1 << (8 - remainOffset)) - 1;
+
+ safeInBuf.b8[0] = (safeInBuf.b8[0] & mask8) |
+ (pcBufferOut[0] & ~mask8);
+ }
+ /* If last byte is a partial byte, the last bits of the output
+ * need to be preserved */
+ const uint8_t bitlen_with_off = remainOffset +
+ cipherLengthInBits;
+
+ if ((bitlen_with_off & 0x7) != 0)
+ preserve_bits(&KS8bit, pcBufferOut, pcBufferIn,
+ &safeOutBuf, &safeInBuf,
+ bitlen_with_off, byteLength);
+
+ xor_keystrm_rev(safeOutBuf.b8, safeInBuf.b8, KS8bit);
+ memcpy_keystrm(pcBufferOut, safeOutBuf.b8, byteLength);
+ return;
+ }
+ /*
+ * If operation is Out-of-place and there is offset
+ * to be applied, "remainOffset" bits from the output buffer
+ * need to be preserved (only applicable to first byte,
+ * since remainOffset is up to 7 bits)
+ */
+ if ((pIn != pOut) && remainOffset) {
+ const uint8_t mask8 = (uint8_t)(1 << (8 - remainOffset)) - 1;
+
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn, 8);
+ safeInBuf.b8[0] = (safeInBuf.b8[0] & mask8) |
+ (pcBufferOut[0] & ~mask8);
+ xor_keystrm_rev(pcBufferOut, safeInBuf.b8, KS8bit);
+ pcBufferIn += SNOW3G_BLOCK_SIZE;
+ } else {
+ /* At least 64 bits to produce (including offset) */
+ pcBufferIn = xor_keystrm_rev(pcBufferOut, pcBufferIn, KS8bit);
+ }
+
+ if (remainOffset != 0)
+ shiftrem = KS8 << (64 - remainOffset);
+ cipherLengthInBits -= SNOW3G_BLOCK_SIZE * 8 - remainOffset;
+ pcBufferOut += SNOW3G_BLOCK_SIZE;
+
+ while (cipherLengthInBits) {
+ /* produce the next block of keystream */
+ snow3g_keystream_1_8(pCtx, &KS8);
+ KS8bit = (KS8 >> remainOffset) | shiftrem;
+ if (remainOffset != 0)
+ shiftrem = KS8 << (64 - remainOffset);
+ if (cipherLengthInBits >= SNOW3G_BLOCK_SIZE * 8) {
+ pcBufferIn = xor_keystrm_rev(pcBufferOut,
+ pcBufferIn, KS8bit);
+ cipherLengthInBits -= SNOW3G_BLOCK_SIZE * 8;
+ pcBufferOut += SNOW3G_BLOCK_SIZE;
+ /* loop variant */
+ } else {
+ /* end of the loop, handle the last bytes */
+ byteLength = (cipherLengthInBits + 7) / 8;
+ memcpy_keystrm(safeInBuf.b8, pcBufferIn,
+ byteLength);
+
+ /* If last byte is a partial byte, the last bits
+ * of the output need to be preserved */
+ if ((cipherLengthInBits & 0x7) != 0)
+ preserve_bits(&KS8bit, pcBufferOut, pcBufferIn,
+ &safeOutBuf, &safeInBuf,
+ cipherLengthInBits, byteLength);
+
+ xor_keystrm_rev(safeOutBuf.b8, safeInBuf.b8, KS8bit);
+ memcpy_keystrm(pcBufferOut, safeOutBuf.b8, byteLength);
+ cipherLengthInBits = 0;
+ }
+ }
+#ifdef SAFE_DATA
+ CLEAR_VAR(&KS8, sizeof(KS8));
+ CLEAR_VAR(&KS8bit, sizeof(KS8bit));
+ CLEAR_MEM(&safeInBuf, sizeof(safeInBuf));
+ CLEAR_MEM(&safeOutBuf, sizeof(safeOutBuf));
+#endif
+}
+
+/**
+*******************************************************************************
+* @description
+* This function is the core snow3g algorithm for
+* the 3GPP confidentiality and integrity algorithm.
+*
+* @param[in] pCtx Context where the scheduled keys are stored
+* @param[in] pBufferIn Input buffer
+* @param[out] pBufferOut Output buffer
+* @param[in] lengthInBytes length in bytes of the data to be encrypted
+*
+*******************************************************************************/
+static inline void f8_snow3g(snow3gKeyState1_t *pCtx,
+ const void *pIn,
+ void *pOut,
+ const uint32_t lengthInBytes)
+{
+ uint32_t qwords = lengthInBytes / SNOW3G_8_BYTES; /* number of qwords */
+ uint32_t words = lengthInBytes & 4; /* remaining word if not 0 */
+ uint32_t bytes = lengthInBytes & 3; /* remaining bytes */
+ uint32_t KS4; /* 4 bytes of keystream */
+ uint64_t KS8; /* 8 bytes of keystream */
+ const uint8_t *pBufferIn = pIn;
+ uint8_t *pBufferOut = pOut;
+
+ /* process 64 bits at a time */
+ while (qwords--) {
+ /* generate keystream 8 bytes at a time */
+ snow3g_keystream_1_8(pCtx, &KS8);
+
+ /* xor keystream 8 bytes at a time */
+ pBufferIn = xor_keystrm_rev(pBufferOut, pBufferIn, KS8);
+ pBufferOut += SNOW3G_8_BYTES;
+ }
+
+ /* check for remaining 0 to 7 bytes */
+ if (0 != words) {
+ if (bytes) {
+ /* 5 to 7 last bytes, process 8 bytes */
+ uint8_t buftemp[8];
+ uint8_t safeBuff[8];
+
+ memset(safeBuff, 0, SNOW3G_8_BYTES);
+ snow3g_keystream_1_8(pCtx, &KS8);
+ memcpy_keystrm(safeBuff, pBufferIn, 4 + bytes);
+ xor_keystrm_rev(buftemp, safeBuff, KS8);
+ memcpy_keystrm(pBufferOut, buftemp, 4 + bytes);
+#ifdef SAFE_DATA
+ CLEAR_MEM(&safeBuff, sizeof(safeBuff));
+ CLEAR_MEM(&buftemp, sizeof(buftemp));
+#endif
+ } else {
+ /* exactly 4 last bytes */
+ snow3g_keystream_1_4(pCtx, &KS4);
+ xor_keystream_reverse_32(pBufferOut, pBufferIn, KS4);
+ }
+ } else if (0 != bytes) {
+ /* 1 to 3 last bytes */
+ uint8_t buftemp[4];
+ uint8_t safeBuff[4];
+
+ memset(safeBuff, 0, SNOW3G_4_BYTES);
+ snow3g_keystream_1_4(pCtx, &KS4);
+ memcpy_keystream_32(safeBuff, pBufferIn, bytes);
+ xor_keystream_reverse_32(buftemp, safeBuff, KS4);
+ memcpy_keystream_32(pBufferOut, buftemp, bytes);
+#ifdef SAFE_DATA
+ CLEAR_MEM(&safeBuff, sizeof(safeBuff));
+ CLEAR_MEM(&buftemp, sizeof(buftemp));
+#endif
+ }
+
+#ifdef SAFE_DATA
+ CLEAR_VAR(&KS4, sizeof(KS4));
+ CLEAR_VAR(&KS8, sizeof(KS8));
+#endif
+}
+
+#ifdef AVX2
+/**
+*******************************************************************************
+* @description
+* This function converts the state from a 4 buffer state structure to 1
+* buffer state structure.
+*
+* @param[in] pSrcState Pointer to the source state
+* @param[in] pDstState Pointer to the destination state
+* @param[in] NumBuffers Number of buffers
+*
+*******************************************************************************/
+static inline void snow3gStateConvert_8(snow3gKeyState8_t *pSrcState,
+ snow3gKeyState1_t *pDstState,
+ uint32_t NumBuffers)
+{
+ uint32_t T = 0, iLFSR_X = pSrcState->iLFSR_X;
+ __m256i *LFSR_X = pSrcState->LFSR_X;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ switch (NumBuffers) {
+ case 0:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 0);
+ break;
+ case 1:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 1);
+ break;
+ case 2:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 2);
+ break;
+ case 3:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 3);
+ break;
+ case 4:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 4);
+ break;
+ case 5:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 5);
+ break;
+ case 6:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 6);
+ break;
+ case 7:
+ T = _mm256_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 7);
+ break;
+ }
+ pDstState->LFSR_S[i] = T;
+ }
+ i = 0;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ case 4:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 4);
+ break;
+ case 5:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 5);
+ break;
+ case 6:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 6);
+ break;
+ case 7:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 7);
+ break;
+ }
+ pDstState->FSM_R1 = T;
+
+ i = 1;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ case 4:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 4);
+ break;
+ case 5:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 5);
+ break;
+ case 6:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 6);
+ break;
+ case 7:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 7);
+ break;
+ }
+ pDstState->FSM_R2 = T;
+
+ i = 2;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ case 4:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 4);
+ break;
+ case 5:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 5);
+ break;
+ case 6:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 6);
+ break;
+ case 7:
+ T = _mm256_extract_epi32(pSrcState->FSM_X[i], 7);
+ break;
+ }
+ pDstState->FSM_R3 = T;
+}
+#endif /* AVX2 */
+
+/**
+*******************************************************************************
+* @description
+* This function converts the state from a 4 buffer state structure to 1
+* buffer state structure.
+*
+* @param[in] pSrcState Pointer to the source state
+* @param[in] pDstState Pointer to the destination state
+* @param[in] NumBuffers Number of buffers
+*
+*******************************************************************************/
+static inline void snow3gStateConvert_4(snow3gKeyState4_t *pSrcState,
+ snow3gKeyState1_t *pDstState,
+ uint32_t NumBuffers)
+{
+ uint32_t i;
+ uint32_t T = 0, iLFSR_X = pSrcState->iLFSR_X;
+ __m128i *LFSR_X = pSrcState->LFSR_X;
+
+ for (i = 0; i < 16; i++) {
+ switch (NumBuffers) {
+ case 0:
+ T = _mm_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 0);
+ break;
+ case 1:
+ T = _mm_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 1);
+ break;
+ case 2:
+ T = _mm_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 2);
+ break;
+ case 3:
+ T = _mm_extract_epi32(LFSR_X[(i + iLFSR_X) % 16], 3);
+ break;
+ }
+ pDstState->LFSR_S[i] = T;
+ }
+
+ i = 0;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ }
+ pDstState->FSM_R1 = T;
+
+ i = 1;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ }
+ pDstState->FSM_R2 = T;
+
+ i = 2;
+ switch (NumBuffers) {
+ case 0:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 0);
+ break;
+ case 1:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 1);
+ break;
+ case 2:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 2);
+ break;
+ case 3:
+ T = _mm_extract_epi32(pSrcState->FSM_X[i], 3);
+ break;
+ }
+ pDstState->FSM_R3 = T;
+}
+
+/*---------------------------------------------------------
+ * f8()
+ * Initializations and Context size definitions
+ *---------------------------------------------------------*/
+size_t SNOW3G_KEY_SCHED_SIZE(void) { return sizeof(snow3g_key_schedule_t); }
+
+int SNOW3G_INIT_KEY_SCHED(const void *pKey, snow3g_key_schedule_t *pCtx)
+{
+#ifdef SAFE_PARAM
+ if ((pKey == NULL) || (pCtx == NULL))
+ return -1;
+#endif
+
+ const uint32_t *pKey32 = pKey;
+
+ pCtx->k[3] = BSWAP32(pKey32[0]);
+ pCtx->k[2] = BSWAP32(pKey32[1]);
+ pCtx->k[1] = BSWAP32(pKey32[2]);
+ pCtx->k[0] = BSWAP32(pKey32[3]);
+
+ return 0;
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 1 buffer:
+ * Single buffer enc/dec with IV and precomputed key schedule
+ *---------------------------------------------------------*/
+void SNOW3G_F8_1_BUFFER(const snow3g_key_schedule_t *pHandle,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBytes)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) || (pIV == NULL) ||
+ (pBufferIn == NULL) || (pBufferOut == NULL) ||
+ (lengthInBytes == 0) || (lengthInBytes > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+ snow3gKeyState1_t ctx;
+ uint32_t KS4; /* 4 bytes of keystream */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_1(&ctx, pHandle, pIV);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_1_4(&ctx, &KS4);
+
+ f8_snow3g(&ctx, pBufferIn, pBufferOut, lengthInBytes);
+
+#ifdef SAFE_DATA
+ CLEAR_VAR(&KS4, sizeof(KS4));
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 bit 1 buffer:
+ * Single buffer enc/dec with IV and precomputed key schedule
+ *---------------------------------------------------------*/
+void SNOW3G_F8_1_BUFFER_BIT(const snow3g_key_schedule_t *pHandle,
+ const void *pIV,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t lengthInBits,
+ const uint32_t offsetInBits)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) || (pIV == NULL) ||
+ (pBufferIn == NULL) || (pBufferOut == NULL) ||
+ (lengthInBits == 0))
+ return;
+#endif
+
+ snow3gKeyState1_t ctx;
+ uint32_t KS4; /* 4 bytes of keystream */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_1(&ctx, pHandle, pIV);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_1_4(&ctx, &KS4);
+
+ f8_snow3g_bit(&ctx, pBufferIn, pBufferOut, lengthInBits, offsetInBits);
+
+#ifdef SAFE_DATA
+ CLEAR_VAR(&KS4, sizeof(KS4));
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 2 buffer:
+ * Two buffers enc/dec with the same key schedule.
+ * The 3 IVs are independent and are passed as an array of pointers.
+ * Each buffer and data length are separate.
+ *---------------------------------------------------------*/
+void SNOW3G_F8_2_BUFFER(const snow3g_key_schedule_t *pHandle,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pBufIn1,
+ void *pBufOut1,
+ const uint32_t lenInBytes1,
+ const void *pBufIn2,
+ void *pBufOut2,
+ const uint32_t lenInBytes2)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) || (pIV1 == NULL) || (pIV2 == NULL) ||
+ (pBufIn1 == NULL) || (pBufOut1 == NULL) ||
+ (pBufIn2 == NULL) || (pBufOut2 == NULL) ||
+ (lenInBytes1 == 0) || (lenInBytes1 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes2 == 0) || (lenInBytes2 > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+
+ snow3gKeyState1_t ctx1, ctx2;
+ uint32_t KS4; /* 4 bytes of keystream */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_1(&ctx1, pHandle, pIV1);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_1_4(&ctx1, &KS4);
+
+ /* data processing for packet 1 */
+ f8_snow3g(&ctx1, pBufIn1, pBufOut1, lenInBytes1);
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_1(&ctx2, pHandle, pIV2);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_1_4(&ctx2, &KS4);
+
+ /* data processing for packet 2 */
+ f8_snow3g(&ctx2, pBufIn2, pBufOut2, lenInBytes2);
+
+#ifdef SAFE_DATA
+ CLEAR_VAR(&KS4, sizeof(KS4));
+ CLEAR_MEM(&ctx1, sizeof(ctx1));
+ CLEAR_MEM(&ctx2, sizeof(ctx2));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 4 buffer:
+ * Four packets enc/dec with the same key schedule.
+ * The 4 IVs are independent and are passed as an array of pointers.
+ * Each buffer and data length are separate.
+ *---------------------------------------------------------*/
+void SNOW3G_F8_4_BUFFER(const snow3g_key_schedule_t *pHandle,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pBufferIn1,
+ void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2,
+ void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3,
+ void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4,
+ void *pBufferOut4,
+ const uint32_t lengthInBytes4)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) ||
+ (pIV1 == NULL) || (pIV2 == NULL) ||
+ (pIV3 == NULL) || (pIV4 == NULL) ||
+ (pBufferIn1 == NULL) || (pBufferOut1 == NULL) ||
+ (pBufferIn2 == NULL) || (pBufferOut2 == NULL) ||
+ (pBufferIn3 == NULL) || (pBufferOut3 == NULL) ||
+ (pBufferIn4 == NULL) || (pBufferOut4 == NULL) ||
+ (lengthInBytes1 == 0) || (lengthInBytes1 > SNOW3G_MAX_BYTELEN) ||
+ (lengthInBytes2 == 0) || (lengthInBytes2 > SNOW3G_MAX_BYTELEN) ||
+ (lengthInBytes3 == 0) || (lengthInBytes3 > SNOW3G_MAX_BYTELEN) ||
+ (lengthInBytes4 == 0) || (lengthInBytes4 > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+
+ snow3gKeyState4_t ctx;
+ __m128i H, L; /* 4 bytes of keystream */
+ uint32_t lenInBytes1 = lengthInBytes1;
+ uint32_t lenInBytes2 = lengthInBytes2;
+ uint32_t lenInBytes3 = lengthInBytes3;
+ uint32_t lenInBytes4 = lengthInBytes4;
+ uint32_t bytes1 =
+ (lenInBytes1 < lenInBytes2 ? lenInBytes1
+ : lenInBytes2); /* number of bytes */
+ uint32_t bytes2 =
+ (lenInBytes3 < lenInBytes4 ? lenInBytes3
+ : lenInBytes4); /* number of bytes */
+ /* min num of bytes */
+ uint32_t bytes = (bytes1 < bytes2) ? bytes1 : bytes2;
+ uint32_t qwords = bytes / SNOW3G_8_BYTES;
+ uint8_t *pBufOut1 = pBufferOut1;
+ uint8_t *pBufOut2 = pBufferOut2;
+ uint8_t *pBufOut3 = pBufferOut3;
+ uint8_t *pBufOut4 = pBufferOut4;
+ const uint8_t *pBufIn1 = pBufferIn1;
+ const uint8_t *pBufIn2 = pBufferIn2;
+ const uint8_t *pBufIn3 = pBufferIn3;
+ const uint8_t *pBufIn4 = pBufferIn4;
+
+ bytes = qwords * SNOW3G_8_BYTES; /* rounded down minimum length */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_4(&ctx, pHandle, pIV1, pIV2, pIV3, pIV4);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_4_4(&ctx, &L);
+
+ lenInBytes1 -= bytes;
+ lenInBytes2 -= bytes;
+ lenInBytes3 -= bytes;
+ lenInBytes4 -= bytes;
+
+ /* generates 4 bytes at a time on all streams */
+ while (qwords--) {
+ snow3g_keystream_4_8(&ctx, &L, &H);
+ pBufIn1 = xor_keystrm_rev(pBufOut1, pBufIn1,
+ _mm_extract_epi64(L, 0));
+ pBufIn2 = xor_keystrm_rev(pBufOut2, pBufIn2,
+ _mm_extract_epi64(L, 1));
+ pBufIn3 = xor_keystrm_rev(pBufOut3, pBufIn3,
+ _mm_extract_epi64(H, 0));
+ pBufIn4 = xor_keystrm_rev(pBufOut4, pBufIn4,
+ _mm_extract_epi64(H, 1));
+
+ pBufOut1 += SNOW3G_8_BYTES;
+ pBufOut2 += SNOW3G_8_BYTES;
+ pBufOut3 += SNOW3G_8_BYTES;
+ pBufOut4 += SNOW3G_8_BYTES;
+ }
+
+ /* process the remaining of each buffer
+ * - extract the LFSR and FSM structures
+ * - Continue process 1 buffer
+ */
+ if (lenInBytes1) {
+ snow3gKeyState1_t ctx1;
+
+ snow3gStateConvert_4(&ctx, &ctx1, 0);
+ f8_snow3g(&ctx1, pBufIn1, pBufOut1, lenInBytes1);
+ }
+
+ if (lenInBytes2) {
+ snow3gKeyState1_t ctx2;
+
+ snow3gStateConvert_4(&ctx, &ctx2, 1);
+ f8_snow3g(&ctx2, pBufIn2, pBufOut2, lenInBytes2);
+ }
+
+ if (lenInBytes3) {
+ snow3gKeyState1_t ctx3;
+
+ snow3gStateConvert_4(&ctx, &ctx3, 2);
+ f8_snow3g(&ctx3, pBufIn3, pBufOut3, lenInBytes3);
+ }
+
+ if (lenInBytes4) {
+ snow3gKeyState1_t ctx4;
+
+ snow3gStateConvert_4(&ctx, &ctx4, 3);
+ f8_snow3g(&ctx4, pBufIn4, pBufOut4, lenInBytes4);
+ }
+
+#ifdef SAFE_DATA
+ H = _mm_setzero_si128();
+ L = _mm_setzero_si128();
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+
+}
+
+#ifdef AVX2
+/*---------------------------------------------------------
+ * @description
+ * Snow3G 8 buffer ks 8 multi:
+ * Processes 8 packets 8 bytes at a time.
+ * Uses individual key schedule for each buffer.
+ *---------------------------------------------------------*/
+static inline void
+snow3g_8_buffer_ks_8_multi(uint32_t bytes,
+ const snow3g_key_schedule_t * const pKey[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[], const uint32_t *lengthInBytes)
+{
+ uint32_t qwords = bytes / SNOW3G_8_BYTES;
+ __m256i H, L; /* 8 bytes of keystream */
+ snow3gKeyState8_t ctx;
+ int i;
+ const uint8_t *tBufferIn[8];
+ uint8_t *tBufferOut[8];
+ uint32_t tLenInBytes[8];
+
+ bytes = qwords * SNOW3G_8_BYTES; /* rounded down minimum length */
+
+ for (i = 0; i < 8; i++) {
+ tBufferIn[i] = pBufferIn[i];
+ tBufferOut[i] = pBufferOut[i];
+ tLenInBytes[i] = lengthInBytes[i];
+ }
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_8_multiKey(&ctx, pKey, IV);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_8_4(&ctx, &L);
+
+ for (i = 0; i < 8; i++)
+ tLenInBytes[i] -= bytes;
+
+ /* generates 8 sets at a time on all streams */
+ for (i = qwords; i != 0; i--) {
+ int j;
+
+ snow3g_keystream_8_8(&ctx, &L, &H);
+
+ tBufferIn[0] = xor_keystrm_rev(tBufferOut[0], tBufferIn[0],
+ _mm256_extract_epi64(L, 0));
+ tBufferIn[1] = xor_keystrm_rev(tBufferOut[1], tBufferIn[1],
+ _mm256_extract_epi64(L, 1));
+ tBufferIn[2] = xor_keystrm_rev(tBufferOut[2], tBufferIn[2],
+ _mm256_extract_epi64(H, 0));
+ tBufferIn[3] = xor_keystrm_rev(tBufferOut[3], tBufferIn[3],
+ _mm256_extract_epi64(H, 1));
+ tBufferIn[4] = xor_keystrm_rev(tBufferOut[4], tBufferIn[4],
+ _mm256_extract_epi64(L, 2));
+ tBufferIn[5] = xor_keystrm_rev(tBufferOut[5], tBufferIn[5],
+ _mm256_extract_epi64(L, 3));
+ tBufferIn[6] = xor_keystrm_rev(tBufferOut[6], tBufferIn[6],
+ _mm256_extract_epi64(H, 2));
+ tBufferIn[7] = xor_keystrm_rev(tBufferOut[7], tBufferIn[7],
+ _mm256_extract_epi64(H, 3));
+
+ for (j = 0; j < 8; j++)
+ tBufferOut[j] += SNOW3G_8_BYTES;
+ }
+
+ /* process the remaining of each buffer
+ * - extract the LFSR and FSM structures
+ * - Continue process 1 buffer
+ */
+ if (tLenInBytes[0]) {
+ snow3gKeyState1_t ctx1;
+
+ snow3gStateConvert_8(&ctx, &ctx1, 0);
+ f8_snow3g(&ctx1, tBufferIn[0], tBufferOut[0], tLenInBytes[0]);
+ }
+ if (tLenInBytes[1]) {
+ snow3gKeyState1_t ctx2;
+
+ snow3gStateConvert_8(&ctx, &ctx2, 1);
+ f8_snow3g(&ctx2, tBufferIn[1], tBufferOut[1], tLenInBytes[1]);
+ }
+ if (tLenInBytes[2]) {
+ snow3gKeyState1_t ctx3;
+
+ snow3gStateConvert_8(&ctx, &ctx3, 2);
+ f8_snow3g(&ctx3, tBufferIn[2], tBufferOut[2], tLenInBytes[2]);
+ }
+ if (tLenInBytes[3]) {
+ snow3gKeyState1_t ctx4;
+
+ snow3gStateConvert_8(&ctx, &ctx4, 3);
+ f8_snow3g(&ctx4, tBufferIn[3], tBufferOut[3], tLenInBytes[3]);
+ }
+ if (tLenInBytes[4]) {
+ snow3gKeyState1_t ctx5;
+
+ snow3gStateConvert_8(&ctx, &ctx5, 4);
+ f8_snow3g(&ctx5, tBufferIn[4], tBufferOut[4], tLenInBytes[4]);
+ }
+ if (tLenInBytes[5]) {
+ snow3gKeyState1_t ctx6;
+
+ snow3gStateConvert_8(&ctx, &ctx6, 5);
+ f8_snow3g(&ctx6, tBufferIn[5], tBufferOut[5], tLenInBytes[5]);
+ }
+ if (tLenInBytes[6]) {
+ snow3gKeyState1_t ctx7;
+
+ snow3gStateConvert_8(&ctx, &ctx7, 6);
+ f8_snow3g(&ctx7, tBufferIn[6], tBufferOut[6], tLenInBytes[6]);
+ }
+ if (tLenInBytes[7]) {
+ snow3gKeyState1_t ctx8;
+
+ snow3gStateConvert_8(&ctx, &ctx8, 7);
+ f8_snow3g(&ctx8, tBufferIn[7], tBufferOut[7], tLenInBytes[7]);
+ }
+
+#ifdef SAFE_DATA
+ H = _mm256_setzero_si256();
+ L = _mm256_setzero_si256();
+ CLEAR_MEM(&ctx, sizeof(ctx));
+#endif /* SAFE_DATA */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G 8 buffer ks 32 multi:
+ * Processes 8 packets 32 bytes at a time.
+ * Uses individual key schedule for each buffer.
+ *---------------------------------------------------------*/
+static inline void
+snow3g_8_buffer_ks_32_multi(uint32_t bytes,
+ const snow3g_key_schedule_t * const pKey[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[], const uint32_t *lengthInBytes)
+{
+
+ snow3gKeyState8_t ctx;
+ uint32_t i;
+
+ const uint8_t *tBufferIn[8];
+ uint8_t *tBufferOut[8];
+ uint32_t tLenInBytes[8];
+
+ for (i = 0; i < 8; i++) {
+ tBufferIn[i] = pBufferIn[i];
+ tBufferOut[i] = pBufferOut[i];
+ tLenInBytes[i] = lengthInBytes[i];
+ }
+
+ uint32_t blocks = bytes / 32;
+
+ bytes = blocks * 32; /* rounded down minimum length */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_8_multiKey(&ctx, pKey, IV);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ __m256i ks[8];
+
+ snow3g_keystream_8_4(&ctx, ks);
+
+ for (i = 0; i < 8; i++)
+ tLenInBytes[i] -= bytes;
+
+ __m256i in[8];
+
+ /* generates 8 sets at a time on all streams */
+ for (i = 0; i < blocks; i++) {
+ int j;
+
+ in[0] = _mm256_loadu_si256((const __m256i *)tBufferIn[0]);
+ in[1] = _mm256_loadu_si256((const __m256i *)tBufferIn[1]);
+ in[2] = _mm256_loadu_si256((const __m256i *)tBufferIn[2]);
+ in[3] = _mm256_loadu_si256((const __m256i *)tBufferIn[3]);
+ in[4] = _mm256_loadu_si256((const __m256i *)tBufferIn[4]);
+ in[5] = _mm256_loadu_si256((const __m256i *)tBufferIn[5]);
+ in[6] = _mm256_loadu_si256((const __m256i *)tBufferIn[6]);
+ in[7] = _mm256_loadu_si256((const __m256i *)tBufferIn[7]);
+
+ snow3g_keystream_8_32(&ctx, ks);
+
+ _mm256_storeu_si256((__m256i *)tBufferOut[0],
+ _mm256_xor_si256(in[0], ks[0]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[1],
+ _mm256_xor_si256(in[1], ks[1]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[2],
+ _mm256_xor_si256(in[2], ks[2]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[3],
+ _mm256_xor_si256(in[3], ks[3]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[4],
+ _mm256_xor_si256(in[4], ks[4]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[5],
+ _mm256_xor_si256(in[5], ks[5]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[6],
+ _mm256_xor_si256(in[6], ks[6]));
+ _mm256_storeu_si256((__m256i *)tBufferOut[7],
+ _mm256_xor_si256(in[7], ks[7]));
+
+ for (j = 0; j < 8; j++) {
+ tBufferIn[i] += 32;
+ tBufferOut[i] += 32;
+ }
+ }
+
+ /* process the remaining of each buffer
+ * - extract the LFSR and FSM structures
+ * - Continue process 1 buffer
+ */
+ if (tLenInBytes[0]) {
+ snow3gKeyState1_t ctx1;
+
+ snow3gStateConvert_8(&ctx, &ctx1, 0);
+ f8_snow3g(&ctx1, tBufferIn[0], tBufferOut[0], tLenInBytes[0]);
+ }
+ if (tLenInBytes[1]) {
+ snow3gKeyState1_t ctx2;
+
+ snow3gStateConvert_8(&ctx, &ctx2, 1);
+ f8_snow3g(&ctx2, tBufferIn[1], tBufferOut[1], tLenInBytes[1]);
+ }
+ if (tLenInBytes[2]) {
+ snow3gKeyState1_t ctx3;
+
+ snow3gStateConvert_8(&ctx, &ctx3, 2);
+ f8_snow3g(&ctx3, tBufferIn[2], tBufferOut[2], tLenInBytes[2]);
+ }
+ if (tLenInBytes[3]) {
+ snow3gKeyState1_t ctx4;
+
+ snow3gStateConvert_8(&ctx, &ctx4, 3);
+ f8_snow3g(&ctx4, tBufferIn[3], tBufferOut[3], tLenInBytes[3]);
+ }
+ if (tLenInBytes[4]) {
+ snow3gKeyState1_t ctx5;
+
+ snow3gStateConvert_8(&ctx, &ctx5, 4);
+ f8_snow3g(&ctx5, tBufferIn[4], tBufferOut[4], tLenInBytes[4]);
+ }
+ if (tLenInBytes[5]) {
+ snow3gKeyState1_t ctx6;
+
+ snow3gStateConvert_8(&ctx, &ctx6, 5);
+ f8_snow3g(&ctx6, tBufferIn[5], tBufferOut[5], tLenInBytes[5]);
+ }
+ if (tLenInBytes[6]) {
+ snow3gKeyState1_t ctx7;
+
+ snow3gStateConvert_8(&ctx, &ctx7, 6);
+ f8_snow3g(&ctx7, tBufferIn[6], tBufferOut[6], tLenInBytes[6]);
+ }
+ if (tLenInBytes[7]) {
+ snow3gKeyState1_t ctx8;
+
+ snow3gStateConvert_8(&ctx, &ctx8, 7);
+ f8_snow3g(&ctx8, tBufferIn[7], tBufferOut[7], tLenInBytes[7]);
+ }
+
+#ifdef SAFE_DATA
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_MEM(&ks, sizeof(ks));
+ CLEAR_MEM(&in, sizeof(in));
+#endif /* SAFE_DATA */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G 8 buffer ks 8 multi:
+ * Processes 8 packets 8 bytes at a time.
+ * Uses same key schedule for each buffer.
+ *---------------------------------------------------------*/
+static inline void
+snow3g_8_buffer_ks_8(uint32_t bytes,
+ const snow3g_key_schedule_t *pHandle,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufferIn1, void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5, void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6, void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7, void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8, void *pBufferOut8,
+ const uint32_t lengthInBytes8)
+{
+
+ uint32_t qwords = bytes / SNOW3G_8_BYTES;
+ __m256i H, L; /* 8 bytes of keystream */
+ snow3gKeyState8_t ctx;
+ int i;
+ uint32_t lenInBytes1 = lengthInBytes1;
+ uint32_t lenInBytes2 = lengthInBytes2;
+ uint32_t lenInBytes3 = lengthInBytes3;
+ uint32_t lenInBytes4 = lengthInBytes4;
+ uint32_t lenInBytes5 = lengthInBytes5;
+ uint32_t lenInBytes6 = lengthInBytes6;
+ uint32_t lenInBytes7 = lengthInBytes7;
+ uint32_t lenInBytes8 = lengthInBytes8;
+ uint8_t *pBufOut1 = pBufferOut1;
+ uint8_t *pBufOut2 = pBufferOut2;
+ uint8_t *pBufOut3 = pBufferOut3;
+ uint8_t *pBufOut4 = pBufferOut4;
+ uint8_t *pBufOut5 = pBufferOut5;
+ uint8_t *pBufOut6 = pBufferOut6;
+ uint8_t *pBufOut7 = pBufferOut7;
+ uint8_t *pBufOut8 = pBufferOut8;
+ const uint8_t *pBufIn1 = pBufferIn1;
+ const uint8_t *pBufIn2 = pBufferIn2;
+ const uint8_t *pBufIn3 = pBufferIn3;
+ const uint8_t *pBufIn4 = pBufferIn4;
+ const uint8_t *pBufIn5 = pBufferIn5;
+ const uint8_t *pBufIn6 = pBufferIn6;
+ const uint8_t *pBufIn7 = pBufferIn7;
+ const uint8_t *pBufIn8 = pBufferIn8;
+
+ bytes = qwords * SNOW3G_8_BYTES; /* rounded down minimum length */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_8(&ctx, pHandle, pIV1, pIV2, pIV3,
+ pIV4, pIV5, pIV6, pIV7, pIV8);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ snow3g_keystream_8_4(&ctx, &L);
+
+ lenInBytes1 -= bytes;
+ lenInBytes2 -= bytes;
+ lenInBytes3 -= bytes;
+ lenInBytes4 -= bytes;
+ lenInBytes5 -= bytes;
+ lenInBytes6 -= bytes;
+ lenInBytes7 -= bytes;
+ lenInBytes8 -= bytes;
+
+ /* generates 8 sets at a time on all streams */
+ for (i = qwords; i != 0; i--) {
+ snow3g_keystream_8_8(&ctx, &L, &H);
+
+ pBufIn1 = xor_keystrm_rev(pBufOut1, pBufIn1,
+ _mm256_extract_epi64(L, 0));
+ pBufIn2 = xor_keystrm_rev(pBufOut2, pBufIn2,
+ _mm256_extract_epi64(L, 1));
+ pBufIn3 = xor_keystrm_rev(pBufOut3, pBufIn3,
+ _mm256_extract_epi64(H, 0));
+ pBufIn4 = xor_keystrm_rev(pBufOut4, pBufIn4,
+ _mm256_extract_epi64(H, 1));
+ pBufIn5 = xor_keystrm_rev(pBufOut5, pBufIn5,
+ _mm256_extract_epi64(L, 2));
+ pBufIn6 = xor_keystrm_rev(pBufOut6, pBufIn6,
+ _mm256_extract_epi64(L, 3));
+ pBufIn7 = xor_keystrm_rev(pBufOut7, pBufIn7,
+ _mm256_extract_epi64(H, 2));
+ pBufIn8 = xor_keystrm_rev(pBufOut8, pBufIn8,
+ _mm256_extract_epi64(H, 3));
+
+ pBufOut1 += SNOW3G_8_BYTES;
+ pBufOut2 += SNOW3G_8_BYTES;
+ pBufOut3 += SNOW3G_8_BYTES;
+ pBufOut4 += SNOW3G_8_BYTES;
+ pBufOut5 += SNOW3G_8_BYTES;
+ pBufOut6 += SNOW3G_8_BYTES;
+ pBufOut7 += SNOW3G_8_BYTES;
+ pBufOut8 += SNOW3G_8_BYTES;
+ }
+
+ /* process the remaining of each buffer
+ * - extract the LFSR and FSM structures
+ * - Continue process 1 buffer
+ */
+ if (lenInBytes1) {
+ snow3gKeyState1_t ctx1;
+
+ snow3gStateConvert_8(&ctx, &ctx1, 0);
+ f8_snow3g(&ctx1, pBufIn1, pBufOut1, lenInBytes1);
+ }
+
+ if (lenInBytes2) {
+ snow3gKeyState1_t ctx2;
+
+ snow3gStateConvert_8(&ctx, &ctx2, 1);
+ f8_snow3g(&ctx2, pBufIn2, pBufOut2, lenInBytes2);
+ }
+
+ if (lenInBytes3) {
+ snow3gKeyState1_t ctx3;
+
+ snow3gStateConvert_8(&ctx, &ctx3, 2);
+ f8_snow3g(&ctx3, pBufIn3, pBufOut3, lenInBytes3);
+ }
+
+ if (lenInBytes4) {
+ snow3gKeyState1_t ctx4;
+
+ snow3gStateConvert_8(&ctx, &ctx4, 3);
+ f8_snow3g(&ctx4, pBufIn4, pBufOut4, lenInBytes4);
+ }
+
+ if (lenInBytes5) {
+ snow3gKeyState1_t ctx5;
+
+ snow3gStateConvert_8(&ctx, &ctx5, 4);
+ f8_snow3g(&ctx5, pBufIn5, pBufOut5, lenInBytes5);
+ }
+
+ if (lenInBytes6) {
+ snow3gKeyState1_t ctx6;
+
+ snow3gStateConvert_8(&ctx, &ctx6, 5);
+ f8_snow3g(&ctx6, pBufIn6, pBufOut6, lenInBytes6);
+ }
+
+ if (lenInBytes7) {
+ snow3gKeyState1_t ctx7;
+
+ snow3gStateConvert_8(&ctx, &ctx7, 6);
+ f8_snow3g(&ctx7, pBufIn7, pBufOut7, lenInBytes7);
+ }
+
+ if (lenInBytes8) {
+ snow3gKeyState1_t ctx8;
+
+ snow3gStateConvert_8(&ctx, &ctx8, 7);
+ f8_snow3g(&ctx8, pBufIn8, pBufOut8, lenInBytes8);
+ }
+
+#ifdef SAFE_DATA
+ H = _mm256_setzero_si256();
+ L = _mm256_setzero_si256();
+ CLEAR_MEM(&ctx, sizeof(ctx));
+#endif /* SAFE_DATA */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G 8 buffer ks 32 multi:
+ * Processes 8 packets 32 bytes at a time.
+ * Uses same key schedule for each buffer.
+ *---------------------------------------------------------*/
+static inline void
+snow3g_8_buffer_ks_32(uint32_t bytes,
+ const snow3g_key_schedule_t *pKey,
+ const void *pIV1, const void *pIV2,
+ const void *pIV3, const void *pIV4,
+ const void *pIV5, const void *pIV6,
+ const void *pIV7, const void *pIV8,
+ const void *pBufferIn1, void *pBufferOut1,
+ const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes4,
+ const void *pBufferIn5, void *pBufferOut5,
+ const uint32_t lengthInBytes5,
+ const void *pBufferIn6, void *pBufferOut6,
+ const uint32_t lengthInBytes6,
+ const void *pBufferIn7, void *pBufferOut7,
+ const uint32_t lengthInBytes7,
+ const void *pBufferIn8, void *pBufferOut8,
+ const uint32_t lengthInBytes8)
+{
+ snow3gKeyState8_t ctx;
+ uint32_t i;
+ uint32_t lenInBytes1 = lengthInBytes1;
+ uint32_t lenInBytes2 = lengthInBytes2;
+ uint32_t lenInBytes3 = lengthInBytes3;
+ uint32_t lenInBytes4 = lengthInBytes4;
+ uint32_t lenInBytes5 = lengthInBytes5;
+ uint32_t lenInBytes6 = lengthInBytes6;
+ uint32_t lenInBytes7 = lengthInBytes7;
+ uint32_t lenInBytes8 = lengthInBytes8;
+ uint8_t *pBufOut1 = pBufferOut1;
+ uint8_t *pBufOut2 = pBufferOut2;
+ uint8_t *pBufOut3 = pBufferOut3;
+ uint8_t *pBufOut4 = pBufferOut4;
+ uint8_t *pBufOut5 = pBufferOut5;
+ uint8_t *pBufOut6 = pBufferOut6;
+ uint8_t *pBufOut7 = pBufferOut7;
+ uint8_t *pBufOut8 = pBufferOut8;
+ const uint8_t *pBufIn1 = pBufferIn1;
+ const uint8_t *pBufIn2 = pBufferIn2;
+ const uint8_t *pBufIn3 = pBufferIn3;
+ const uint8_t *pBufIn4 = pBufferIn4;
+ const uint8_t *pBufIn5 = pBufferIn5;
+ const uint8_t *pBufIn6 = pBufferIn6;
+ const uint8_t *pBufIn7 = pBufferIn7;
+ const uint8_t *pBufIn8 = pBufferIn8;
+
+ uint32_t blocks = bytes / 32;
+
+ bytes = blocks * 32; /* rounded down minimum length */
+
+ /* Initialize the schedule from the IV */
+ snow3gStateInitialize_8(&ctx, pKey, pIV1, pIV2, pIV3, pIV4, pIV5, pIV6,
+ pIV7, pIV8);
+
+ /* Clock FSM and LFSR once, ignore the keystream */
+ __m256i ks[8];
+
+ snow3g_keystream_8_4(&ctx, ks);
+
+ lenInBytes1 -= bytes;
+ lenInBytes2 -= bytes;
+ lenInBytes3 -= bytes;
+ lenInBytes4 -= bytes;
+ lenInBytes5 -= bytes;
+ lenInBytes6 -= bytes;
+ lenInBytes7 -= bytes;
+ lenInBytes8 -= bytes;
+
+ __m256i in[8];
+
+ /* generates 8 sets at a time on all streams */
+ for (i = 0; i < blocks; i++) {
+
+ in[0] = _mm256_loadu_si256((const __m256i *)pBufIn1);
+ in[1] = _mm256_loadu_si256((const __m256i *)pBufIn2);
+ in[2] = _mm256_loadu_si256((const __m256i *)pBufIn3);
+ in[3] = _mm256_loadu_si256((const __m256i *)pBufIn4);
+ in[4] = _mm256_loadu_si256((const __m256i *)pBufIn5);
+ in[5] = _mm256_loadu_si256((const __m256i *)pBufIn6);
+ in[6] = _mm256_loadu_si256((const __m256i *)pBufIn7);
+ in[7] = _mm256_loadu_si256((const __m256i *)pBufIn8);
+
+ snow3g_keystream_8_32(&ctx, ks);
+
+ _mm256_storeu_si256((__m256i *)pBufOut1,
+ _mm256_xor_si256(in[0], ks[0]));
+ _mm256_storeu_si256((__m256i *)pBufOut2,
+ _mm256_xor_si256(in[1], ks[1]));
+ _mm256_storeu_si256((__m256i *)pBufOut3,
+ _mm256_xor_si256(in[2], ks[2]));
+ _mm256_storeu_si256((__m256i *)pBufOut4,
+ _mm256_xor_si256(in[3], ks[3]));
+ _mm256_storeu_si256((__m256i *)pBufOut5,
+ _mm256_xor_si256(in[4], ks[4]));
+ _mm256_storeu_si256((__m256i *)pBufOut6,
+ _mm256_xor_si256(in[5], ks[5]));
+ _mm256_storeu_si256((__m256i *)pBufOut7,
+ _mm256_xor_si256(in[6], ks[6]));
+ _mm256_storeu_si256((__m256i *)pBufOut8,
+ _mm256_xor_si256(in[7], ks[7]));
+
+ pBufIn1 += 32;
+ pBufIn2 += 32;
+ pBufIn3 += 32;
+ pBufIn4 += 32;
+ pBufIn5 += 32;
+ pBufIn6 += 32;
+ pBufIn7 += 32;
+ pBufIn8 += 32;
+
+ pBufOut1 += 32;
+ pBufOut2 += 32;
+ pBufOut3 += 32;
+ pBufOut4 += 32;
+ pBufOut5 += 32;
+ pBufOut6 += 32;
+ pBufOut7 += 32;
+ pBufOut8 += 32;
+ }
+
+ /* process the remaining of each buffer
+ * - extract the LFSR and FSM structures
+ * - Continue process 1 buffer
+ */
+ if (lenInBytes1) {
+ snow3gKeyState1_t ctx1;
+
+ snow3gStateConvert_8(&ctx, &ctx1, 0);
+ f8_snow3g(&ctx1, pBufIn1, pBufOut1, lenInBytes1);
+ }
+
+ if (lenInBytes2) {
+ snow3gKeyState1_t ctx2;
+
+ snow3gStateConvert_8(&ctx, &ctx2, 1);
+ f8_snow3g(&ctx2, pBufIn2, pBufOut2, lenInBytes2);
+ }
+
+ if (lenInBytes3) {
+ snow3gKeyState1_t ctx3;
+
+ snow3gStateConvert_8(&ctx, &ctx3, 2);
+ f8_snow3g(&ctx3, pBufIn3, pBufOut3, lenInBytes3);
+ }
+
+ if (lenInBytes4) {
+ snow3gKeyState1_t ctx4;
+
+ snow3gStateConvert_8(&ctx, &ctx4, 3);
+ f8_snow3g(&ctx4, pBufIn4, pBufOut4, lenInBytes4);
+ }
+
+ if (lenInBytes5) {
+ snow3gKeyState1_t ctx5;
+
+ snow3gStateConvert_8(&ctx, &ctx5, 4);
+ f8_snow3g(&ctx5, pBufIn5, pBufOut5, lenInBytes5);
+ }
+
+ if (lenInBytes6) {
+ snow3gKeyState1_t ctx6;
+
+ snow3gStateConvert_8(&ctx, &ctx6, 5);
+ f8_snow3g(&ctx6, pBufIn6, pBufOut6, lenInBytes6);
+ }
+
+ if (lenInBytes7) {
+ snow3gKeyState1_t ctx7;
+
+ snow3gStateConvert_8(&ctx, &ctx7, 6);
+ f8_snow3g(&ctx7, pBufIn7, pBufOut7, lenInBytes7);
+ }
+
+ if (lenInBytes8) {
+ snow3gKeyState1_t ctx8;
+
+ snow3gStateConvert_8(&ctx, &ctx8, 7);
+ f8_snow3g(&ctx8, pBufIn8, pBufOut8, lenInBytes8);
+ }
+
+#ifdef SAFE_DATA
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_MEM(&ks, sizeof(ks));
+ CLEAR_MEM(&in, sizeof(in));
+#endif /* SAFE_DATA */
+}
+#endif /* AVX2 */
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 8 buffer, multi-key:
+ * Eight packets enc/dec with eight respective key schedules.
+ * The 8 IVs are independent and are passed as an array of pointers.
+ * Each buffer and data length are separate.
+ *---------------------------------------------------------*/
+void SNOW3G_F8_8_BUFFER_MULTIKEY(const snow3g_key_schedule_t * const pKey[],
+ const void * const IV[],
+ const void * const BufferIn[],
+ void *BufferOut[],
+ const uint32_t lengthInBytes[])
+{
+ int i;
+
+#ifdef SAFE_PARAM
+ if ((pKey == NULL) || (IV == NULL) || (BufferIn == NULL) ||
+ (BufferOut == NULL) || (lengthInBytes == NULL))
+ return;
+
+ for (i = 0; i < 8; i++)
+ if ((pKey[i] == NULL) || (IV[i] == NULL) ||
+ (BufferIn[i] == NULL) || (BufferOut[i] == NULL) ||
+ (lengthInBytes[i] == 0) ||
+ (lengthInBytes[i] > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+
+#ifndef AVX2
+ /* basic C workaround for lack of non AVX2 implementation */
+ for (i = 0; i < 8; i++)
+ SNOW3G_F8_1_BUFFER(pKey[i], IV[i], BufferIn[i], BufferOut[i],
+ lengthInBytes[i]);
+#else
+ uint32_t bytes = lengthInBytes[0];
+
+ /* find min byte lenght */
+ for (i = 1; i < 8; i++)
+ if (lengthInBytes[i] < bytes)
+ bytes = lengthInBytes[i];
+
+ if (bytes % 32) {
+ snow3g_8_buffer_ks_8_multi(bytes, pKey, IV, BufferIn, BufferOut,
+ lengthInBytes);
+ } else {
+ snow3g_8_buffer_ks_32_multi(bytes, pKey, IV, BufferIn,
+ BufferOut, lengthInBytes);
+ }
+#ifdef SAFE_DATA
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#endif /* AVX2 */
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F8 8 buffer:
+ * Eight packets enc/dec with the same key schedule.
+ * The 8 IVs are independent and are passed as an array of pointers.
+ * Each buffer and data length are separate.
+ * Uses AVX instructions.
+ *---------------------------------------------------------*/
+void SNOW3G_F8_8_BUFFER(const snow3g_key_schedule_t *pHandle,
+ const void *pIV1,
+ const void *pIV2,
+ const void *pIV3,
+ const void *pIV4,
+ const void *pIV5,
+ const void *pIV6,
+ const void *pIV7,
+ const void *pIV8,
+ const void *pBufIn1,
+ void *pBufOut1,
+ const uint32_t lenInBytes1,
+ const void *pBufIn2,
+ void *pBufOut2,
+ const uint32_t lenInBytes2,
+ const void *pBufIn3,
+ void *pBufOut3,
+ const uint32_t lenInBytes3,
+ const void *pBufIn4,
+ void *pBufOut4,
+ const uint32_t lenInBytes4,
+ const void *pBufIn5,
+ void *pBufOut5,
+ const uint32_t lenInBytes5,
+ const void *pBufIn6,
+ void *pBufOut6,
+ const uint32_t lenInBytes6,
+ const void *pBufIn7,
+ void *pBufOut7,
+ const uint32_t lenInBytes7,
+ const void *pBufIn8,
+ void *pBufOut8,
+ const uint32_t lenInBytes8)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) ||
+ (pIV1 == NULL) || (pIV2 == NULL) ||
+ (pIV3 == NULL) || (pIV4 == NULL) ||
+ (pIV5 == NULL) || (pIV6 == NULL) ||
+ (pIV7 == NULL) || (pIV8 == NULL) ||
+ (pBufIn1 == NULL) || (pBufOut1 == NULL) ||
+ (pBufIn2 == NULL) || (pBufOut2 == NULL) ||
+ (pBufIn3 == NULL) || (pBufOut3 == NULL) ||
+ (pBufIn4 == NULL) || (pBufOut4 == NULL) ||
+ (pBufIn5 == NULL) || (pBufOut5 == NULL) ||
+ (pBufIn6 == NULL) || (pBufOut6 == NULL) ||
+ (pBufIn7 == NULL) || (pBufOut7 == NULL) ||
+ (pBufIn8 == NULL) || (pBufOut8 == NULL) ||
+ (lenInBytes1 == 0) || (lenInBytes1 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes2 == 0) || (lenInBytes2 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes3 == 0) || (lenInBytes3 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes4 == 0) || (lenInBytes4 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes5 == 0) || (lenInBytes5 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes6 == 0) || (lenInBytes6 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes7 == 0) || (lenInBytes7 > SNOW3G_MAX_BYTELEN) ||
+ (lenInBytes8 == 0) || (lenInBytes8 > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+
+#ifdef AVX2
+ uint32_t bytes1 =
+ (lenInBytes1 < lenInBytes2 ? lenInBytes1
+ : lenInBytes2); /* number of bytes */
+ uint32_t bytes2 =
+ (lenInBytes3 < lenInBytes4 ? lenInBytes3
+ : lenInBytes4); /* number of bytes */
+ uint32_t bytes3 =
+ (lenInBytes5 < lenInBytes6 ? lenInBytes5
+ : lenInBytes6); /* number of bytes */
+ uint32_t bytes4 =
+ (lenInBytes7 < lenInBytes8 ? lenInBytes7
+ : lenInBytes8); /* number of bytes */
+ uint32_t bytesq1 =
+ (bytes1 < bytes2) ? bytes1 : bytes2; /* min number of bytes */
+ uint32_t bytesq2 = (bytes3 < bytes4) ? bytes3 : bytes4;
+ uint32_t bytes = (bytesq1 < bytesq2) ? bytesq1 : bytesq2;
+
+ if (bytes % 32) {
+ snow3g_8_buffer_ks_8(
+ bytes, pHandle, pIV1, pIV2, pIV3, pIV4, pIV5, pIV6,
+ pIV7, pIV8, pBufIn1, pBufOut1, lenInBytes1, pBufIn2,
+ pBufOut2, lenInBytes2, pBufIn3, pBufOut3, lenInBytes3,
+ pBufIn4, pBufOut4, lenInBytes4, pBufIn5, pBufOut5,
+ lenInBytes5, pBufIn6, pBufOut6, lenInBytes6, pBufIn7,
+ pBufOut7, lenInBytes7, pBufIn8, pBufOut8, lenInBytes8);
+ } else {
+ snow3g_8_buffer_ks_32(
+ bytes, pHandle, pIV1, pIV2, pIV3, pIV4, pIV5, pIV6,
+ pIV7, pIV8, pBufIn1, pBufOut1, lenInBytes1, pBufIn2,
+ pBufOut2, lenInBytes2, pBufIn3, pBufOut3, lenInBytes3,
+ pBufIn4, pBufOut4, lenInBytes4, pBufIn5, pBufOut5,
+ lenInBytes5, pBufIn6, pBufOut6, lenInBytes6, pBufIn7,
+ pBufOut7, lenInBytes7, pBufIn8, pBufOut8, lenInBytes8);
+ }
+#ifdef SAFE_DATA
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#else /* ~AVX2 */
+ SNOW3G_F8_2_BUFFER(pHandle, pIV1, pIV2, pBufIn1, pBufOut1, lenInBytes1,
+ pBufIn2, pBufOut2, lenInBytes2);
+
+ SNOW3G_F8_2_BUFFER(pHandle, pIV3, pIV4, pBufIn3, pBufOut3, lenInBytes3,
+ pBufIn4, pBufOut4, lenInBytes4);
+
+ SNOW3G_F8_2_BUFFER(pHandle, pIV5, pIV6, pBufIn5, pBufOut5, lenInBytes5,
+ pBufIn6, pBufOut6, lenInBytes6);
+
+ SNOW3G_F8_2_BUFFER(pHandle, pIV7, pIV8, pBufIn7, pBufOut7, lenInBytes7,
+ pBufIn8, pBufOut8, lenInBytes8);
+#endif /* AVX */
+}
+
+/******************************************************************************
+ * @description
+ * Snow3G F8 multi packet:
+ * Performs F8 enc/dec on [n] packets. The operation is performed in-place.
+ * The input IV's are passed in Little Endian format.
+ * The KeySchedule is in Little Endian format.
+ ******************************************************************************/
+void SNOW3G_F8_N_BUFFER(const snow3g_key_schedule_t *pCtx,
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufLenInBytes[],
+ const uint32_t packetCount)
+{
+#ifdef SAFE_PARAM
+ uint32_t i;
+
+ if ((pCtx == NULL) || (IV == NULL) || (pBufferIn == NULL) ||
+ (pBufferOut == NULL) || (bufLenInBytes == NULL))
+ return;
+
+ for (i = 0; i < packetCount; i++)
+ if ((IV[i] == NULL) || (pBufferIn[i] == NULL) ||
+ (pBufferOut[i] == NULL) || (bufLenInBytes[i] == 0) ||
+ (bufLenInBytes[i] > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+ if (packetCount > 16) {
+ pBufferOut[0] = NULL;
+ printf("packetCount too high (%d)\n", packetCount);
+ return;
+ }
+
+ uint32_t packet_index, inner_index, pktCnt = packetCount;
+ int sortNeeded = 0, tempLen = 0;
+ uint8_t *srctempbuff;
+ uint8_t *dsttempbuff;
+ uint8_t *ivtempbuff;
+ uint8_t *pSrcBuf[NUM_PACKETS_16] = {NULL};
+ uint8_t *pDstBuf[NUM_PACKETS_16] = {NULL};
+ uint8_t *pIV[NUM_PACKETS_16] = {NULL};
+ uint32_t lensBuf[NUM_PACKETS_16] = {0};
+
+ memcpy((void *)lensBuf, bufLenInBytes, packetCount * sizeof(uint32_t));
+ memcpy((void *)pSrcBuf, pBufferIn, packetCount * sizeof(void *));
+ memcpy((void *)pDstBuf, pBufferOut, packetCount * sizeof(void *));
+ memcpy((void *)pIV, IV, packetCount * sizeof(void *));
+
+ packet_index = packetCount;
+
+ while (packet_index--) {
+
+ /* check if all packets are sorted by decreasing length */
+ if (packet_index > 0 && lensBuf[packet_index - 1] <
+ lensBuf[packet_index]) {
+ /* this packet array is not correctly sorted */
+ sortNeeded = 1;
+ }
+ }
+
+ if (sortNeeded) {
+
+ /* sort packets in decreasing buffer size from [0] to
+ [n]th packet, ** where buffer[0] will contain longest
+ buffer and buffer[n] will contain the shortest buffer.
+ 4 arrays are swapped :
+ - pointers to input buffers
+ - pointers to output buffers
+ - pointers to input IV's
+ - input buffer lengths */
+ packet_index = packetCount;
+ while (packet_index--) {
+
+ inner_index = packet_index;
+ while (inner_index--) {
+
+ if (lensBuf[packet_index] >
+ lensBuf[inner_index]) {
+
+ /* swap buffers to arrange in
+ descending order from [0]. */
+ srctempbuff = pSrcBuf[packet_index];
+ dsttempbuff = pDstBuf[packet_index];
+ ivtempbuff = pIV[packet_index];
+ tempLen = lensBuf[packet_index];
+
+ pSrcBuf[packet_index] =
+ pSrcBuf[inner_index];
+ pDstBuf[packet_index] =
+ pDstBuf[inner_index];
+ pIV[packet_index] = pIV[inner_index];
+ lensBuf[packet_index] =
+ lensBuf[inner_index];
+
+ pSrcBuf[inner_index] = srctempbuff;
+ pDstBuf[inner_index] = dsttempbuff;
+ pIV[inner_index] = ivtempbuff;
+ lensBuf[inner_index] = tempLen;
+ }
+ } /* for inner packet index (inner bubble-sort) */
+ } /* for outer packet index (outer bubble-sort) */
+ } /* if sortNeeded */
+
+ packet_index = 0;
+ /* process 8 buffers at-a-time */
+#ifdef AVX2
+ while (pktCnt >= 8) {
+ pktCnt -= 8;
+ SNOW3G_F8_8_BUFFER(pCtx, pIV[packet_index],
+ pIV[packet_index + 1],
+ pIV[packet_index + 2],
+ pIV[packet_index + 3],
+ pIV[packet_index + 4],
+ pIV[packet_index + 5],
+ pIV[packet_index + 6],
+ pIV[packet_index + 7],
+ pSrcBuf[packet_index],
+ pDstBuf[packet_index],
+ lensBuf[packet_index],
+ pSrcBuf[packet_index + 1],
+ pDstBuf[packet_index + 1],
+ lensBuf[packet_index + 1],
+ pSrcBuf[packet_index + 2],
+ pDstBuf[packet_index + 2],
+ lensBuf[packet_index + 2],
+ pSrcBuf[packet_index + 3],
+ pDstBuf[packet_index + 3],
+ lensBuf[packet_index + 3],
+ pSrcBuf[packet_index + 4],
+ pDstBuf[packet_index + 4],
+ lensBuf[packet_index + 4],
+ pSrcBuf[packet_index + 5],
+ pDstBuf[packet_index + 5],
+ lensBuf[packet_index + 5],
+ pSrcBuf[packet_index + 6],
+ pDstBuf[packet_index + 6],
+ lensBuf[packet_index + 6],
+ pSrcBuf[packet_index + 7],
+ pDstBuf[packet_index + 7],
+ lensBuf[packet_index + 7]);
+ packet_index += 8;
+ }
+#endif
+ /* process 4 buffers at-a-time */
+ while (pktCnt >= 4) {
+ pktCnt -= 4;
+ SNOW3G_F8_4_BUFFER(pCtx, pIV[packet_index + 0],
+ pIV[packet_index + 1],
+ pIV[packet_index + 2],
+ pIV[packet_index + 3],
+ pSrcBuf[packet_index + 0],
+ pDstBuf[packet_index + 0],
+ lensBuf[packet_index + 0],
+ pSrcBuf[packet_index + 1],
+ pDstBuf[packet_index + 1],
+ lensBuf[packet_index + 1],
+ pSrcBuf[packet_index + 2],
+ pDstBuf[packet_index + 2],
+ lensBuf[packet_index + 2],
+ pSrcBuf[packet_index + 3],
+ pDstBuf[packet_index + 3],
+ lensBuf[packet_index + 3]);
+ packet_index += 4;
+ }
+
+ /* process 2 packets at-a-time */
+ while (pktCnt >= 2) {
+ pktCnt -= 2;
+ SNOW3G_F8_2_BUFFER(pCtx, pIV[packet_index + 0],
+ pIV[packet_index + 1],
+ pSrcBuf[packet_index + 0],
+ pDstBuf[packet_index + 0],
+ lensBuf[packet_index + 0],
+ pSrcBuf[packet_index + 1],
+ pDstBuf[packet_index + 1],
+ lensBuf[packet_index + 1]);
+ packet_index += 2;
+ }
+
+ /* remaining packets are processed 1 at a time */
+ while (pktCnt--) {
+ SNOW3G_F8_1_BUFFER(pCtx, pIV[packet_index + 0],
+ pSrcBuf[packet_index + 0],
+ pDstBuf[packet_index + 0],
+ lensBuf[packet_index + 0]);
+ packet_index++;
+ }
+}
+
+void SNOW3G_F8_N_BUFFER_MULTIKEY(const snow3g_key_schedule_t * const pCtx[],
+ const void * const IV[],
+ const void * const pBufferIn[],
+ void *pBufferOut[],
+ const uint32_t bufLenInBytes[],
+ const uint32_t packetCount)
+{
+#ifdef SAFE_PARAM
+ uint32_t i;
+
+ if ((pCtx == NULL) || (IV == NULL) || (pBufferIn == NULL) ||
+ (pBufferOut == NULL) || (bufLenInBytes == NULL))
+ return;
+
+ for (i = 0; i < packetCount; i++)
+ if ((pCtx[i] == NULL) || (IV[i] == NULL) ||
+ (pBufferIn[i] == NULL) || (pBufferOut[i] == NULL) ||
+ (bufLenInBytes[i] == 0) ||
+ (bufLenInBytes[i] > SNOW3G_MAX_BYTELEN))
+ return;
+#endif
+ if (packetCount > 16) {
+ pBufferOut[0] = NULL;
+ printf("packetCount too high (%d)\n", packetCount);
+ return;
+ }
+
+ uint32_t packet_index, inner_index, pktCnt = packetCount;
+ int sortNeeded = 0, tempLen = 0;
+ uint8_t *srctempbuff;
+ uint8_t *dsttempbuff;
+ uint8_t *ivtempbuff;
+ snow3g_key_schedule_t *pCtxBuf[NUM_PACKETS_16] = {NULL};
+ uint8_t *pSrcBuf[NUM_PACKETS_16] = {NULL};
+ uint8_t *pDstBuf[NUM_PACKETS_16] = {NULL};
+ uint8_t *pIV[NUM_PACKETS_16] = {NULL};
+ uint32_t lensBuf[NUM_PACKETS_16] = {0};
+ snow3g_key_schedule_t *tempCtx;
+
+ memcpy((void *)pCtxBuf, pCtx, packetCount * sizeof(void *));
+ memcpy((void *)lensBuf, bufLenInBytes, packetCount * sizeof(uint32_t));
+ memcpy((void *)pSrcBuf, pBufferIn, packetCount * sizeof(void *));
+ memcpy((void *)pDstBuf, pBufferOut, packetCount * sizeof(void *));
+ memcpy((void *)pIV, IV, packetCount * sizeof(void *));
+
+ packet_index = packetCount;
+
+ while (packet_index--) {
+
+ /* check if all packets are sorted by decreasing length */
+ if (packet_index > 0 && lensBuf[packet_index - 1] <
+ lensBuf[packet_index]) {
+ /* this packet array is not correctly sorted */
+ sortNeeded = 1;
+ }
+ }
+
+ if (sortNeeded) {
+ /* sort packets in decreasing buffer size from [0] to [n]th
+ packet, where buffer[0] will contain longest buffer and
+ buffer[n] will contain the shortest buffer.
+ 4 arrays are swapped :
+ - pointers to input buffers
+ - pointers to output buffers
+ - pointers to input IV's
+ - input buffer lengths */
+ packet_index = packetCount;
+ while (packet_index--) {
+ inner_index = packet_index;
+ while (inner_index--) {
+ if (lensBuf[packet_index] >
+ lensBuf[inner_index]) {
+ /* swap buffers to arrange in
+ descending order from [0]. */
+ srctempbuff = pSrcBuf[packet_index];
+ dsttempbuff = pDstBuf[packet_index];
+ ivtempbuff = pIV[packet_index];
+ tempLen = lensBuf[packet_index];
+ tempCtx = pCtxBuf[packet_index];
+
+ pSrcBuf[packet_index] =
+ pSrcBuf[inner_index];
+ pDstBuf[packet_index] =
+ pDstBuf[inner_index];
+ pIV[packet_index] = pIV[inner_index];
+ lensBuf[packet_index] =
+ lensBuf[inner_index];
+ pCtxBuf[packet_index] =
+ pCtxBuf[inner_index];
+
+ pSrcBuf[inner_index] = srctempbuff;
+ pDstBuf[inner_index] = dsttempbuff;
+ pIV[inner_index] = ivtempbuff;
+ lensBuf[inner_index] = tempLen;
+ pCtxBuf[inner_index] = tempCtx;
+ }
+ } /* for inner packet index (inner bubble-sort) */
+ } /* for outer packet index (outer bubble-sort) */
+ } /* if sortNeeded */
+
+ packet_index = 0;
+ /* process 8 buffers at-a-time */
+#ifdef AVX2
+ while (pktCnt >= 8) {
+ pktCnt -= 8;
+ SNOW3G_F8_8_BUFFER_MULTIKEY(
+ (const snow3g_key_schedule_t * const *)
+ &pCtxBuf[packet_index],
+ (const void * const *)&pIV[packet_index],
+ (const void * const *)&pSrcBuf[packet_index],
+ (void **)&pDstBuf[packet_index],
+ &lensBuf[packet_index]);
+ packet_index += 8;
+ }
+#endif
+ /* TODO process 4 buffers at-a-time */
+ /* TODO process 2 packets at-a-time */
+ /* remaining packets are processed 1 at a time */
+ while (pktCnt--) {
+ SNOW3G_F8_1_BUFFER(pCtxBuf[packet_index + 0],
+ pIV[packet_index + 0],
+ pSrcBuf[packet_index + 0],
+ pDstBuf[packet_index + 0],
+ lensBuf[packet_index + 0]);
+ packet_index++;
+ }
+}
+
+/*---------------------------------------------------------
+ * @description
+ * Snow3G F9 1 buffer
+ * Single buffer digest with IV and precomputed key schedule
+ *---------------------------------------------------------*/
+void SNOW3G_F9_1_BUFFER(const snow3g_key_schedule_t *pHandle,
+ const void *pIV,
+ const void *pBufferIn,
+ const uint64_t lengthInBits,
+ void *pDigest)
+{
+#ifdef SAFE_PARAM
+ if ((pHandle == NULL) || (pIV == NULL) ||
+ (pBufferIn == NULL) || (pDigest == NULL) ||
+ (lengthInBits == 0) || (lengthInBits > SNOW3G_MAX_BITLEN))
+ return;
+#endif
+ snow3gKeyState1_t ctx;
+ uint32_t z[5];
+ uint64_t lengthInQwords, E, V, P;
+ uint64_t i, rem_bits;
+ const uint64_t *inputBuffer;
+
+ inputBuffer = (const uint64_t *)pBufferIn;
+
+ /* Initialize the snow3g key schedule */
+ snow3gStateInitialize_1(&ctx, pHandle, pIV);
+
+ /*Generate 5 keystream words*/
+ snow3g_f9_keystream_words(&ctx, &z[0]);
+
+ P = ((uint64_t)z[0] << 32) | ((uint64_t)z[1]);
+
+ lengthInQwords = lengthInBits / 64;
+
+ E = 0;
+ /* all blocks except the last one */
+ for (i = 0; i < lengthInQwords; i++) {
+ V = BSWAP64(inputBuffer[i]);
+ E = multiply_and_reduce64(E ^ V, P);
+ }
+
+ /* last bits of last block if any left */
+ rem_bits = lengthInBits % 64;
+ if (rem_bits) {
+ /* last bytes, do not go past end of buffer */
+ memcpy(&V, &inputBuffer[i], (rem_bits + 7) / 8);
+ V = BSWAP64(V);
+ V &= (((uint64_t)-1) << (64 - rem_bits)); /* mask extra bits */
+ E = multiply_and_reduce64(E ^ V, P);
+ }
+
+ /* Multiply by Q */
+ E = multiply_and_reduce64(E ^ lengthInBits,
+ (((uint64_t)z[2] << 32) | ((uint64_t)z[3])));
+
+ /* Final MAC */
+ *(uint32_t *)pDigest =
+ (uint32_t)BSWAP64(E ^ ((uint64_t)z[4] << 32));
+#ifdef SAFE_DATA
+ CLEAR_VAR(&E, sizeof(E));
+ CLEAR_VAR(&V, sizeof(V));
+ CLEAR_VAR(&P, sizeof(P));
+ CLEAR_MEM(&z, sizeof(z));
+ CLEAR_MEM(&ctx, sizeof(ctx));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+}
+
+#endif /* SNOW3G_COMMON_H */
diff --git a/src/spdk/intel-ipsec-mb/include/snow3g_internal.h b/src/spdk/intel-ipsec-mb/include/snow3g_internal.h
new file mode 100644
index 000000000..287d60be1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/snow3g_internal.h
@@ -0,0 +1,638 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef _SNOW3G_INTERNAL_H_
+#define _SNOW3G_INTERNAL_H_
+
+#include "intel-ipsec-mb.h"
+#include "wireless_common.h"
+#include "constant_lookup.h"
+
+#define MAX_KEY_LEN (16)
+#define SNOW3G_4_BYTES (4)
+#define SNOW3G_8_BYTES (8)
+#define SNOW3G_8_BITS (8)
+#define SNOW3G_16_BYTES (16)
+#define SNOW3G_16_BITS (16)
+
+#define SNOW3G_BLOCK_SIZE (8)
+
+#define SNOW3G_KEY_LEN_IN_BYTES (16) /* 128b */
+#define SNOW3G_IV_LEN_IN_BYTES (16) /* 128b */
+
+#define SNOW3GCONSTANT (0x1b)
+
+/* Range of input data for SNOW3G is from 1 to 2^32 bits */
+#define SNOW3G_MIN_LEN 1
+#define SNOW3G_MAX_BITLEN (UINT32_MAX)
+#define SNOW3G_MAX_BYTELEN (UINT32_MAX / 8)
+
+#define ComplementaryMask64(x) ((~(x) % 64) + 1)
+#define ComplementaryMask32(x) ((~(x) % 32) + 1)
+
+#ifndef SAFE_LOOKUP
+/*standard lookup */
+#define SNOW3G_LOOKUP_W0(table, idx, size) \
+ table[idx].w0.v
+#define SNOW3G_LOOKUP_W1(table, idx, size) \
+ table[idx].w1.v
+#define SNOW3G_LOOKUP_W2(table, idx, size) \
+ table[idx].w2.v
+#define SNOW3G_LOOKUP_W3(table, idx, size) \
+ table[idx].w3.v
+#else
+/* contant time lookup */
+#if defined (AVX) || defined (AVX2)
+#define SNOW3G_LOOKUP_W0(table, idx, size) \
+ ((uint32_t)(LOOKUP64_AVX(table, idx, size) >> 0))
+#define SNOW3G_LOOKUP_W1(table, idx, size) \
+ ((uint32_t)(LOOKUP64_AVX(table, idx, size) >> 8))
+#define SNOW3G_LOOKUP_W2(table, idx, size) \
+ ((uint32_t)(LOOKUP64_AVX(table, idx, size) >> 16))
+#define SNOW3G_LOOKUP_W3(table, idx, size) \
+ ((uint32_t)(LOOKUP64_AVX(table, idx, size) >> 24))
+#else
+#define SNOW3G_LOOKUP_W0(table, idx, size) \
+ ((uint32_t)(LOOKUP64_SSE(table, idx, size) >> 0))
+#define SNOW3G_LOOKUP_W1(table, idx, size) \
+ ((uint32_t)(LOOKUP64_SSE(table, idx, size) >> 8))
+#define SNOW3G_LOOKUP_W2(table, idx, size) \
+ ((uint32_t)(LOOKUP64_SSE(table, idx, size) >> 16))
+#define SNOW3G_LOOKUP_W3(table, idx, size) \
+ ((uint32_t)(LOOKUP64_SSE(table, idx, size) >> 24))
+#endif /* AVX || AVX2 */
+#endif /* SAFE_LOOKUP */
+
+typedef union SafeBuffer {
+ uint64_t b64;
+ uint32_t b32[2];
+ uint8_t b8[SNOW3G_8_BYTES];
+} SafeBuf;
+
+typedef struct snow3gKeyState1_s {
+ /* 16 LFSR stages */
+ uint32_t LFSR_S[16];
+ /* 3 FSM states */
+ uint32_t FSM_R3;
+ uint32_t FSM_R2;
+ uint32_t FSM_R1;
+} DECLARE_ALIGNED(snow3gKeyState1_t, 16);
+
+typedef struct snow3gKeyState4_s {
+ /* 16 LFSR stages */
+ __m128i LFSR_X[16];
+ /* 3 FSM states */
+ __m128i FSM_X[3];
+ uint32_t iLFSR_X;
+
+} snow3gKeyState4_t;
+
+
+#ifdef _WIN32
+#pragma pack(push,1)
+#define DECLARE_PACKED_UINT32(x) uint32_t x
+#else
+#define DECLARE_PACKED_UINT32(x) uint32_t x __attribute__((__packed__))
+#endif
+
+typedef union snow3gTableEntry_u {
+ uint64_t v;
+ struct {
+ uint8_t shift[3];
+ DECLARE_PACKED_UINT32(v);
+ } w3;
+ struct {
+ uint8_t shift[2];
+ DECLARE_PACKED_UINT32(v);
+ } w2;
+ struct {
+ uint8_t shift[1];
+ DECLARE_PACKED_UINT32(v);
+ } w1;
+ struct {
+ uint8_t shift[4];
+ DECLARE_PACKED_UINT32(v);
+ } w0;
+} snow3gTableEntry_t;
+#ifdef _WIN32
+#pragma pack(pop)
+#endif
+
+#define rotl32(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+
+#define rotr32(x, n) (((x) << (32 - (n))) | ((x) >> (n)))
+
+#define rotl8(x, n) (((x) << (n)) | ((x) >> (8 - (n))))
+
+#define rotr8(x, n) (((x) << (8 - (n))) | ((x) >> (n)))
+
+/*************************************************************************
+ * @description - snow3g internal tables
+ *************************************************************************/
+
+extern const int snow3g_table_A_mul[256];
+extern const int snow3g_table_A_div[256];
+extern snow3gTableEntry_t snow3g_table_S1[256];
+extern snow3gTableEntry_t snow3g_table_S2[256];
+extern const int S1_T0[256];
+extern const int S1_T1[256];
+extern const int S1_T2[256];
+extern const int S1_T3[256];
+extern const int S2_T0[256];
+extern const int S2_T1[256];
+extern const int S2_T2[256];
+extern const int S2_T3[256];
+
+/* -------------------------------------------------------------------
+ * combined S-Box processing for reduced instruction dependencies
+ *
+ * S1_S2_1 : 2 S-Box , 1 packet at a time
+ * S1_S2_S3_1 : 3 S-Box at the same time
+ *
+ * S1_S2_4 : 2 S-Box , 4 packets at a time
+ *
+ * ------------------------------------------------------------------ */
+#ifdef AVX2
+#define _mm256_set_m128i(/* __m128i */ hi, /* __m128i */ lo) \
+ _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 0x1)
+
+#ifndef _mm256_loadu2_m128i
+#define _mm256_loadu2_m128i(hi, lo) \
+ _mm256_inserti128_si256( \
+ _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)lo)), \
+ _mm_loadu_si128((const __m128i *)hi), 1)
+#endif /* _mm256_loadu2_m128i */
+
+typedef struct snow3gKeyState8_s {
+ /* 16 LFSR stages */
+ __m256i LFSR_X[16];
+ /* 3 FSM states */
+ __m256i FSM_X[3];
+ uint32_t iLFSR_X;
+
+} snow3gKeyState8_t;
+
+/* Sbox Snow3g_S1 and Snow3g_S2 with dependency unrolling
+ * for n in [0..3]
+ * w[n-1] = k; y[n] = Snow3g_S2(w[n]); k = Snow3g_S1(x[n])
+ *
+ *
+ */
+#define S1_S2_8(y, w, x, k, l, n) \
+ do { \
+ uint8_t w0, w1, w2, w3; \
+ uint8_t x0, x1, x2, x3; \
+ uint32_t ty = l; \
+ w3 = _mm256_extract_epi8(w, (4 * n + 0)); \
+ w2 = _mm256_extract_epi8(w, (4 * n + 1)); \
+ w1 = _mm256_extract_epi8(w, (4 * n + 2)); \
+ w0 = _mm256_extract_epi8(w, (4 * n + 3)); \
+ l = snow3g_table_S2[w3].w3.v ^ snow3g_table_S2[w2].w2.v ^ \
+ snow3g_table_S2[w1].w1.v ^ snow3g_table_S2[w0].w0.v; \
+ if (n != 0) \
+ w = _mm256_insert_epi32(w, k, (n - 1)); \
+ if (n != 0) \
+ y = _mm256_insert_epi32(y, ty, (n - 1)); \
+ x3 = _mm256_extract_epi8(x, (4 * n + 0)); \
+ x2 = _mm256_extract_epi8(x, (4 * n + 1)); \
+ x1 = _mm256_extract_epi8(x, (4 * n + 2)); \
+ x0 = _mm256_extract_epi8(x, (4 * n + 3)); \
+ k = snow3g_table_S1[x3].w3.v ^ snow3g_table_S1[x2].w2.v ^ \
+ snow3g_table_S1[x1].w1.v ^ snow3g_table_S1[x0].w0.v; \
+ if (n == 7) \
+ w = _mm256_insert_epi32(w, k, n); \
+ if (n == 7) \
+ y = _mm256_insert_epi32(y, l, n); \
+ } while (0)
+#endif /* AVX2 */
+
+
+#if defined (NO_AESNI) || defined (SAFE_LOOKUP)
+/* help compilers to interleave the
+ * operations and table access latencies
+ */
+
+/* Sbox Snow3g_S1 and Snow3g_S2, simple C code
+ * y = Snow3g_S2(w); w = Snow3g_S1(x);
+ */
+#define S1_S2_1(y, w, x) \
+ do { \
+ uint32_t w0, w1, w2, w3; \
+ uint32_t x0, x1, x2, x3; \
+ uint32_t tw, tx; \
+ w3 = w & 0xff; \
+ x3 = x & 0xff; \
+ tw = SNOW3G_LOOKUP_W3(snow3g_table_S2, w3, \
+ sizeof(snow3g_table_S2)); \
+ tx = SNOW3G_LOOKUP_W3(snow3g_table_S1, x3, \
+ sizeof(snow3g_table_S1)); \
+ w0 = w >> 24; \
+ x0 = x >> 24; \
+ tw ^= SNOW3G_LOOKUP_W0(snow3g_table_S2, w0, \
+ sizeof(snow3g_table_S2)); \
+ tx ^= SNOW3G_LOOKUP_W0(snow3g_table_S1, x0, \
+ sizeof(snow3g_table_S1)); \
+ w1 = (w >> 16) & 0xff; \
+ x1 = (x >> 16) & 0xff; \
+ tw ^= SNOW3G_LOOKUP_W1(snow3g_table_S2, w1, \
+ sizeof(snow3g_table_S2)); \
+ tx ^= SNOW3G_LOOKUP_W1(snow3g_table_S1, x1, \
+ sizeof(snow3g_table_S1)); \
+ w2 = (w >> 8) & 0xff; \
+ x2 = (x >> 8) & 0xff; \
+ y = tw ^ SNOW3G_LOOKUP_W2(snow3g_table_S2, w2, \
+ sizeof(snow3g_table_S2)); \
+ w = tx ^ SNOW3G_LOOKUP_W2(snow3g_table_S1, x2, \
+ sizeof(snow3g_table_S1)); \
+ } while (0)
+
+/* Sbox Snow3g_S1 and Snow3g_S2, simple C code
+ * y = Snow3g_S2(w); w = Snow3g_S1(x); u = Snow3g_S1(z);
+ */
+#define S1_S2_S3_1(y, w, x, u, z) \
+ do { \
+ unsigned w0, w1, w2, w3; \
+ unsigned x0, x1, x2, x3; \
+ unsigned z0, z1, z2, z3; \
+ uint32_t tw, tx, tz; \
+ w3 = w & 0xff; \
+ x3 = x & 0xff; \
+ z3 = z & 0xff; \
+ tw = SNOW3G_LOOKUP_W3(snow3g_table_S2, w3, \
+ sizeof(snow3g_table_S2)); \
+ tx = SNOW3G_LOOKUP_W3(snow3g_table_S1, x3, \
+ sizeof(snow3g_table_S1)); \
+ tz = SNOW3G_LOOKUP_W3(snow3g_table_S1, z3, \
+ sizeof(snow3g_table_S1)); \
+ w0 = w >> 24; \
+ x0 = x >> 24; \
+ z0 = z >> 24; \
+ tw ^= SNOW3G_LOOKUP_W0(snow3g_table_S2, w0, \
+ sizeof(snow3g_table_S2)); \
+ tx ^= SNOW3G_LOOKUP_W0(snow3g_table_S1, x0, \
+ sizeof(snow3g_table_S1)); \
+ tz ^= SNOW3G_LOOKUP_W0(snow3g_table_S1, z0, \
+ sizeof(snow3g_table_S1)); \
+ w1 = (w >> 16) & 0xff; \
+ x1 = (x >> 16) & 0xff; \
+ z1 = (z >> 16) & 0xff; \
+ tw ^= SNOW3G_LOOKUP_W1(snow3g_table_S2, w1, \
+ sizeof(snow3g_table_S2)); \
+ tx ^= SNOW3G_LOOKUP_W1(snow3g_table_S1, x1, \
+ sizeof(snow3g_table_S1)); \
+ tz ^= SNOW3G_LOOKUP_W1(snow3g_table_S1, z1, \
+ sizeof(snow3g_table_S1)); \
+ w2 = (w >> 8) & 0xff; \
+ x2 = (x >> 8) & 0xff; \
+ z2 = (z >> 8) & 0xff; \
+ y = tw ^ SNOW3G_LOOKUP_W2(snow3g_table_S2, w2, \
+ sizeof(snow3g_table_S2)); \
+ w = tx ^ SNOW3G_LOOKUP_W2(snow3g_table_S1, x2, \
+ sizeof(snow3g_table_S1)); \
+ u = tz ^ SNOW3G_LOOKUP_W2(snow3g_table_S1, z2, \
+ sizeof(snow3g_table_S1)); \
+ } while (0)
+
+/* Sbox Snow3g_S1 and Snow3g_S2 with dependency unrolling
+ * for n in [0..3]
+ * w[n-1] = k; y[n] = Snow3g_S2(w[n]); k = Snow3g_S1(x[n])
+ *
+ *
+ */
+#define S1_S2_4(y, w, x, k, l, n) \
+ do { \
+ unsigned w0, w1, w2, w3; \
+ unsigned x0, x1, x2, x3; \
+ uint32_t ty = l; \
+ w3 = _mm_extract_epi8(w, (4 * n + 0)); \
+ w2 = _mm_extract_epi8(w, (4 * n + 1)); \
+ w1 = _mm_extract_epi8(w, (4 * n + 2)); \
+ w0 = _mm_extract_epi8(w, (4 * n + 3)); \
+ l = SNOW3G_LOOKUP_W3(snow3g_table_S2, w3, \
+ sizeof(snow3g_table_S2)) ^ \
+ SNOW3G_LOOKUP_W2(snow3g_table_S2, w2, \
+ sizeof(snow3g_table_S2)) ^ \
+ SNOW3G_LOOKUP_W1(snow3g_table_S2, w1, \
+ sizeof(snow3g_table_S2)) ^ \
+ SNOW3G_LOOKUP_W0(snow3g_table_S2, w0, \
+ sizeof(snow3g_table_S2)); \
+ if (n != 0) \
+ w = _mm_insert_epi32(w, k, (n - 1)); \
+ if (n != 0) \
+ y = _mm_insert_epi32(y, ty, (n - 1)); \
+ x3 = _mm_extract_epi8(x, (4 * n + 0)); \
+ x2 = _mm_extract_epi8(x, (4 * n + 1)); \
+ x1 = _mm_extract_epi8(x, (4 * n + 2)); \
+ x0 = _mm_extract_epi8(x, (4 * n + 3)); \
+ k = SNOW3G_LOOKUP_W3(snow3g_table_S1, x3, \
+ sizeof(snow3g_table_S1)) ^ \
+ SNOW3G_LOOKUP_W2(snow3g_table_S1, x2, \
+ sizeof(snow3g_table_S1)) ^ \
+ SNOW3G_LOOKUP_W1(snow3g_table_S1, x1, \
+ sizeof(snow3g_table_S1)) ^ \
+ SNOW3G_LOOKUP_W0(snow3g_table_S1, x0, \
+ sizeof(snow3g_table_S1)); \
+ if (n == 3) \
+ w = _mm_insert_epi32(w, k, n); \
+ if (n == 3) \
+ y = _mm_insert_epi32(y, l, n); \
+ } while (0)
+
+#else /* SSE/AVX */
+
+/* use AES-NI Rijndael for Snow3G Sbox, overlap the latency
+ * of AESENC with Snow3g_S2 sbox calculations
+ */
+
+/* Sbox Snow3g_S1 and Snow3g_S2, simple C code
+ * y = Snow3g_S2(w); w = rijndael Snow3g_S1(x);
+ */
+#define S1_S2_1(y, w, x) \
+ do { \
+ __m128i m10, m11; \
+ m11 = _mm_cvtsi32_si128(x); \
+ m10 = _mm_setzero_si128(); \
+ m11 = _mm_shuffle_epi32(m11, 0x0); \
+ m11 = _mm_aesenc_si128(m11, m10); \
+ y = Snow3g_S2(w); \
+ w = _mm_cvtsi128_si32(m11); \
+ } while (0)
+
+/* Sbox Snow3g_S1 and Snow3g_S2
+ * y = Snow3g_S2(w); w = rijndael Snow3g_S1(x); u = rijndael Snow3g_S1(z);
+ */
+#define S1_S2_S3_1(y, w, x, v, z) \
+ do { \
+ __m128i m10, m11, m12; \
+ m11 = _mm_cvtsi32_si128(x); \
+ m10 = _mm_setzero_si128(); \
+ m11 = _mm_shuffle_epi32(m11, 0x0); \
+ m11 = _mm_aesenc_si128(m11, m10); \
+ m12 = _mm_cvtsi32_si128(z); \
+ m12 = _mm_shuffle_epi32(m12, 0x0); \
+ m12 = _mm_aesenc_si128(m12, m10); \
+ y = Snow3g_S2(w); \
+ w = _mm_cvtsi128_si32(m11); \
+ v = _mm_cvtsi128_si32(m12); \
+ } while (0)
+/* Sbox Snow3g_S1 and Snow3g_S2
+ * for n in [0..3]
+ * extract packet data
+ * y = Snow3g_S2(w); w = rijndael Snow3g_S1(x)
+ * insert the result data
+ */
+#define S1_S2_4(y, w, x, k, n) \
+ do { \
+ uint32_t ty; \
+ unsigned w0, w1, w2, w3; \
+ __m128i m10, m11; \
+ m10 = _mm_setzero_si128(); \
+ m11 = _mm_shuffle_epi32( \
+ x, ((n << 6) | (n << 4) | (n << 2) | (n << 0))); \
+ m11 = _mm_aesenc_si128(m11, m10); \
+ w3 = _mm_extract_epi8(w, (4 * n + 0)); \
+ w2 = _mm_extract_epi8(w, (4 * n + 1)); \
+ w1 = _mm_extract_epi8(w, (4 * n + 2)); \
+ w0 = _mm_extract_epi8(w, (4 * n + 3)); \
+ ty = snow3g_table_S2[w3].w3.v ^ snow3g_table_S2[w1].w1.v ^ \
+ snow3g_table_S2[w2].w2.v ^ snow3g_table_S2[w0].w0.v; \
+ if (n != 0) \
+ w = _mm_insert_epi32(w, k, (n - 1)); \
+ k = _mm_cvtsi128_si32(m11); \
+ if (n == 3) \
+ w = _mm_insert_epi32(w, k, n); \
+ y = _mm_insert_epi32(y, ty, n); \
+ } while (0)
+
+#endif /* NO_AESNI || SAFE_LOOKUP */
+
+/* -------------------------------------------------------------------
+ * Sbox Snow3g_S1 maps a 32bit input to a 32bit output
+ * ------------------------------------------------------------------ */
+static inline uint32_t Snow3g_S1(uint32_t w)
+{
+ uint32_t w0, w1, w2, w3;
+
+ w3 = w & 0xff;
+ w1 = (w >> 16) & 0xff;
+ w2 = (w >> 8) & 0xff;
+ w0 = w >> 24;
+ return snow3g_table_S1[w3].w3.v ^ snow3g_table_S1[w1].w1.v ^
+ snow3g_table_S1[w2].w2.v ^ snow3g_table_S1[w0].w0.v;
+}
+
+/* -------------------------------------------------------------------
+ * Sbox Snow3g_S2 maps a 32bit input to a 32bit output
+ * ------------------------------------------------------------------ */
+static inline uint32_t Snow3g_S2(uint32_t w)
+{
+ uint32_t w0, w1, w2, w3;
+
+ w3 = w & 0xff;
+ w1 = (w >> 16) & 0xff;
+ w2 = (w >> 8) & 0xff;
+ w0 = w >> 24;
+
+ return snow3g_table_S2[w3].w3.v ^ snow3g_table_S2[w1].w1.v ^
+ snow3g_table_S2[w2].w2.v ^ snow3g_table_S2[w0].w0.v;
+}
+
+/* -------------------------------------------------------------------
+ * LFSR array shift by 1 position
+ * ------------------------------------------------------------------ */
+static inline void ShiftLFSR_1(snow3gKeyState1_t *pCtx)
+{
+ uint32_t i;
+
+ for (i = 0; i < 15; i++)
+ pCtx->LFSR_S[i] = pCtx->LFSR_S[i + 1];
+}
+
+/* -------------------------------------------------------------------
+ * LFSR array shift by 2 positions
+ * ------------------------------------------------------------------ */
+static inline void ShiftTwiceLFSR_1(snow3gKeyState1_t *pCtx)
+{
+ int i;
+
+ for (i = 0; i < 14; i++)
+ pCtx->LFSR_S[i] = pCtx->LFSR_S[i + 2];
+}
+
+/* -------------------------------------------------------------------
+ * ClockFSM function as defined in snow3g standard
+ * The FSM has 2 input words S5 and S15 from the LFSR
+ * produces a 32 bit output word F
+ * ------------------------------------------------------------------ */
+static inline void ClockFSM_1(snow3gKeyState1_t *pCtx, uint32_t *data)
+{
+ uint32_t F, R;
+
+ F = pCtx->LFSR_S[15] + pCtx->FSM_R1;
+ R = pCtx->FSM_R3 ^ pCtx->LFSR_S[5];
+ *data = F ^ pCtx->FSM_R2;
+ R += pCtx->FSM_R2;
+ S1_S2_1(pCtx->FSM_R3, pCtx->FSM_R2, pCtx->FSM_R1);
+ pCtx->FSM_R1 = R;
+}
+
+/* -------------------------------------------------------------------
+ * ClockLFSR functin as defined in snow3g standard
+ * ------------------------------------------------------------------ */
+static inline void ClockLFSR_1(snow3gKeyState1_t *pCtx)
+{
+ uint32_t V = pCtx->LFSR_S[2];
+ uint32_t S0 = pCtx->LFSR_S[0];
+ uint32_t S11 = pCtx->LFSR_S[11];
+
+ V ^= snow3g_table_A_mul[S0 >> 24];
+ V ^= snow3g_table_A_div[S11 & 0xff];
+ V ^= S0 << 8;
+ V ^= S11 >> 8;
+
+ ShiftLFSR_1(pCtx);
+
+ pCtx->LFSR_S[15] = V;
+}
+
+/**
+ *******************************************************************************
+ * @description
+ * This function initializes the key schedule for 1 buffer for snow3g f8/f9.
+ *
+ * @param[in] pCtx Context where the scheduled keys are stored
+ * @param [in] pKeySched Key schedule
+ * @param [in] pIV IV
+ *
+ ******************************************************************************/
+static inline void
+snow3gStateInitialize_1(snow3gKeyState1_t *pCtx,
+ const snow3g_key_schedule_t *pKeySched,
+ const void *pIV)
+{
+ uint32_t K, L;
+ int i;
+ uint32_t V0, V1;
+ uint32_t F0, F1;
+ uint32_t L0, L1, L11, L12;
+ uint32_t R0, R1;
+ uint32_t FSM2, FSM3, FSM4;
+ const uint32_t *pIV32 = pIV;
+
+ /* LFSR initialisation */
+ for (i = 0; i < 4; i++) {
+ K = pKeySched->k[i];
+ L = ~K;
+ pCtx->LFSR_S[i + 4] = K;
+ pCtx->LFSR_S[i + 12] = K;
+ pCtx->LFSR_S[i + 0] = L;
+ pCtx->LFSR_S[i + 8] = L;
+ }
+
+ pCtx->LFSR_S[15] ^= BSWAP32(pIV32[3]);
+ pCtx->LFSR_S[12] ^= BSWAP32(pIV32[2]);
+ pCtx->LFSR_S[10] ^= BSWAP32(pIV32[1]);
+ pCtx->LFSR_S[9] ^= BSWAP32(pIV32[0]);
+
+ /* FSM initialialization */
+ FSM2 = 0x0;
+ FSM3 = 0x0;
+ FSM4 = 0x0;
+ R1 = 0x0;
+ V1 = pCtx->LFSR_S[15];
+
+ for (i = 0; i < 16; i++) {
+ /* clock FSM + clock LFSR + clockFSM + clock LFSR */
+ L0 = pCtx->LFSR_S[0];
+ L1 = pCtx->LFSR_S[1];
+ V0 = pCtx->LFSR_S[2];
+ F0 = V1 + R1; /** (s15 + R1) **/
+ V1 = pCtx->LFSR_S[3];
+ V0 ^= snow3g_table_A_mul[L0 >> 24]; /* MUL(s0,0 ) */
+ F0 ^= FSM2; /** (s15 + R1) ^ R2 **/
+ V1 ^= snow3g_table_A_mul[L1 >> 24];
+ L11 = pCtx->LFSR_S[11];
+ L12 = pCtx->LFSR_S[12];
+ R0 = FSM3 ^ pCtx->LFSR_S[5]; /*** (R3 ^ s5 ) ***/
+ V0 ^= snow3g_table_A_div[L11 & 0xff]; /* DIV(s11,3 )*/
+ R0 += FSM2; /*** R2 + (R3 ^ s5 ) ***/
+ V1 ^= snow3g_table_A_div[L12 & 0xff];
+ V0 ^= L0 << 8; /* (s0,1 || s0,2 || s0,3 || 0x00) */
+ V1 ^= L1 << 8;
+ V0 ^= L11 >> 8; /* (0x00 || s11,0 || s11,1 || s11,2 ) */
+ V1 ^= L12 >> 8;
+ S1_S2_S3_1(FSM3, FSM2, R1, FSM4, R0);
+ V0 ^= F0; /* ^F */
+ R1 = FSM3 ^ pCtx->LFSR_S[6];
+ F1 = V0 + R0;
+ F1 ^= FSM2;
+ R1 += FSM2;
+ FSM3 = Snow3g_S2(FSM2);
+ FSM2 = FSM4;
+ V1 ^= F1;
+
+ /* shift LFSR twice */
+ ShiftTwiceLFSR_1(pCtx);
+
+ pCtx->LFSR_S[14] = V0;
+ pCtx->LFSR_S[15] = V1;
+ }
+
+ /* set FSM into scheduling structure */
+ pCtx->FSM_R3 = FSM3;
+ pCtx->FSM_R2 = FSM2;
+ pCtx->FSM_R1 = R1;
+}
+
+/**
+ *******************************************************************************
+ * @description
+ * This function generates 5 words of keystream used in the initial stages
+ * of snow3g F9.
+ *
+ * @param[in] pCtx Context where the scheduled
+ *keys are stored
+ * @param[in/out] pKeyStream Pointer to the generated keystream
+ *
+ ******************************************************************************/
+static inline void snow3g_f9_keystream_words(snow3gKeyState1_t *pCtx,
+ uint32_t *pKeyStream)
+{
+ uint32_t F, XX;
+ int i;
+
+ ClockFSM_1(pCtx, &XX);
+ ClockLFSR_1(pCtx);
+
+ for (i = 0; i < 5; i++) {
+ ClockFSM_1(pCtx, &F);
+ pKeyStream[i] = F ^ pCtx->LFSR_S[0];
+ ClockLFSR_1(pCtx);
+ }
+}
+
+#endif /* _SNOW3G_INTERNAL_H_ */
diff --git a/src/spdk/intel-ipsec-mb/include/transpose_avx2.asm b/src/spdk/intel-ipsec-mb/include/transpose_avx2.asm
new file mode 100644
index 000000000..fed12cf4b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/transpose_avx2.asm
@@ -0,0 +1,218 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _TRANSPOSE_AVX2_ASM_
+%define _TRANSPOSE_AVX2_ASM_
+
+%include "include/reg_sizes.asm"
+
+; LOAD ALL 8 LANES FOR 8x8 32-BIT TRANSPOSE
+;
+; r0-r7 [out] ymm registers which will contain the data to be transposed
+; addr0-addr7 [in] pointers to the next 32-byte block of data to be fetch for all 8 lanes
+; ptr_offset [in] offset to be applied on all pointers (addr0-addr7)
+%macro TRANSPOSE8_U32_LOAD8 17
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%addr0 %9
+%define %%addr1 %10
+%define %%addr2 %11
+%define %%addr3 %12
+%define %%addr4 %13
+%define %%addr5 %14
+%define %%addr6 %15
+%define %%addr7 %16
+%define %%ptr_offset %17
+
+; Expected output data
+;
+; r0 = {e3 e2 e1 e0 a3 a2 a1 a0}
+; r1 = {f3 f2 f1 f0 b3 b2 b1 b0}
+; r2 = {g3 g2 g1 g0 c3 c2 c1 c0}
+; r3 = {h3 h2 h1 h0 d3 d2 d1 d0}
+; r4 = {e7 e6 e5 e4 a7 a6 a5 a4}
+; r5 = {f7 f6 f5 f4 b7 b6 b5 b4}
+; r6 = {g7 g6 g5 g4 c7 c6 c5 c4}
+; r7 = {h7 h6 h5 h4 d7 d6 d5 d4}
+
+ vmovups XWORD(%%r0),[%%addr0+%%ptr_offset]
+ vmovups XWORD(%%r1),[%%addr1+%%ptr_offset]
+ vmovups XWORD(%%r2),[%%addr2+%%ptr_offset]
+ vmovups XWORD(%%r3),[%%addr3+%%ptr_offset]
+ vmovups XWORD(%%r4),[%%addr0+%%ptr_offset+16]
+ vmovups XWORD(%%r5),[%%addr1+%%ptr_offset+16]
+ vmovups XWORD(%%r6),[%%addr2+%%ptr_offset+16]
+ vmovups XWORD(%%r7),[%%addr3+%%ptr_offset+16]
+
+ vinserti128 %%r0, %%r0, [%%addr4+%%ptr_offset], 0x01
+ vinserti128 %%r1, %%r1, [%%addr5+%%ptr_offset], 0x01
+ vinserti128 %%r2, %%r2, [%%addr6+%%ptr_offset], 0x01
+ vinserti128 %%r3, %%r3, [%%addr7+%%ptr_offset], 0x01
+ vinserti128 %%r4, %%r4, [%%addr4+%%ptr_offset+16], 0x01
+ vinserti128 %%r5, %%r5, [%%addr5+%%ptr_offset+16], 0x01
+ vinserti128 %%r6, %%r6, [%%addr6+%%ptr_offset+16], 0x01
+ vinserti128 %%r7, %%r7, [%%addr7+%%ptr_offset+16], 0x01
+
+%endmacro
+
+; 8x8 32-BIT TRANSPOSE
+;
+; Before calling this macro, TRANSPOSE8_U32_LOAD8 must be called.
+;
+; r0-r3 [in/out] ymm registers containing bytes 0-15 of each 32B block (e.g. ymm0 = [e3-e0 a3-a0])
+; r4-r7 [in/out] ymm registers containing bytes 16-31 of each 32B block (e.g. ymm4 = [e4-e7 a4-a7])
+; t0-t1 [clobbered] ymm temporary registers
+%macro TRANSPOSE8_U32 10
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%t0 %9
+%define %%t1 %10
+; Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
+; r0 = {e3 e2 e1 e0 a3 a2 a1 a0}
+; r1 = {f3 f2 f1 f0 b3 b2 b1 b0}
+; r2 = {g3 g2 g1 g0 c3 c2 c1 c0}
+; r3 = {h3 h2 h1 h0 d3 d2 d1 d0}
+; r4 = {e7 e6 e5 e4 a7 a6 a5 a4}
+; r5 = {f7 f6 f5 f4 b7 b6 b5 b4}
+; r6 = {g7 g6 g5 g4 c7 c6 c5 c4}
+; r7 = {h7 h6 h5 h4 d7 d6 d5 d4}
+;
+; Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
+; r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
+; r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
+; r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
+; r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
+; r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
+; r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
+; r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
+; r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
+;
+ ; process top half (r0..r3)
+ vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {f1 f0 e1 e0 b1 b0 a1 a0}
+ vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {f3 f2 e3 e2 b3 b2 a3 a2}
+ vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {h1 h0 g1 g0 d1 d0 c1 c0}
+ vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {h3 h2 g3 g2 d3 d2 c3 c2}
+
+ vshufps %%r1, %%t0, %%t1, 0xDD ; r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
+ vshufps %%r3, %%r0, %%r2, 0xDD ; r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
+ vshufps %%r2, %%r0, %%r2, 0x88 ; r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
+ vshufps %%r0, %%t0, %%t1, 0x88 ; r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
+
+ ;; process bottom half (r4..r7)
+ vshufps %%t0, %%r4, %%r5, 0x44 ; t0 = {f5 f4 e5 e4 b5 b4 a5 a4}
+ vshufps %%r4, %%r4, %%r5, 0xEE ; r4 = {f7 f6 e7 e6 b7 b6 a7 a6}
+ vshufps %%t1, %%r6, %%r7, 0x44 ; t1 = {h5 h4 g5 g4 d5 d4 c5 c4}
+ vshufps %%r6, %%r6, %%r7, 0xEE ; r6 = {h7 h6 g7 g6 d7 d6 c7 c6}
+
+ vshufps %%r5, %%t0, %%t1, 0xDD ; r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
+ vshufps %%r7, %%r4, %%r6, 0xDD ; r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
+ vshufps %%r6, %%r4, %%r6, 0x88 ; r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
+ vshufps %%r4, %%t0, %%t1, 0x88 ; r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
+%endmacro
+
+; LOAD ALL 4 LANES FOR 4x4 64-BIT TRANSPOSE
+;
+; r0-r3 [out] ymm registers which will contain the data to be transposed
+; addr0-addr3 [in] pointers to the next 32-byte block of data to be fetch for the 4 lanes
+; ptr_offset [in] offset to be applied on all pointers (addr0-addr3)
+%macro TRANSPOSE4_U64_LOAD4 9
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%addr0 %5
+%define %%addr1 %6
+%define %%addr2 %7
+%define %%addr3 %8
+%define %%ptr_offset %9
+
+; Expected output data
+;
+; r0 = {c1 c0 a1 a0}
+; r1 = {d1 d0 b1 b0}
+; r2 = {c3 c2 a3 a2}
+; r3 = {d3 d2 b3 b2}
+
+ vmovupd XWORD(%%r0),[%%addr0+%%ptr_offset]
+ vmovupd XWORD(%%r1),[%%addr1+%%ptr_offset]
+ vmovupd XWORD(%%r2),[%%addr0+%%ptr_offset+16]
+ vmovupd XWORD(%%r3),[%%addr1+%%ptr_offset+16]
+
+ vinserti128 %%r0, %%r0, [%%addr2+%%ptr_offset], 0x01
+ vinserti128 %%r1, %%r1, [%%addr3+%%ptr_offset], 0x01
+ vinserti128 %%r2, %%r2, [%%addr2+%%ptr_offset+16], 0x1
+ vinserti128 %%r3, %%r3, [%%addr3+%%ptr_offset+16], 0x01
+
+%endmacro
+
+; 4x4 64-BIT TRANSPOSE
+;
+; Before calling this macro, TRANSPOSE4_U64_LOAD4 must be called.
+;
+; This macro takes 4 registers as input (r0-r3)
+; and transposes their content (64-bit elements)
+; outputing the data in registers (o0,r1,o2,r3),
+; using two additional registers
+%macro TRANSPOSE4_U64 6
+%define %%r0 %1 ; [in] ymm register for row 0 input (c0-c1 a1-a0)
+%define %%r1 %2 ; [in/out] ymm register for row 1 input (d0-d1 b1-b0) and output
+%define %%r2 %3 ; [in] ymm register for row 2 input (c3-c2 a3-a2)
+%define %%r3 %4 ; [in/out] ymm register for row 3 input (d3-d2 b3-b2) and output
+%define %%o0 %5 ; [out] ymm register for row 0 output
+%define %%o2 %6 ; [out] ymm register for row 2 output
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {c1 c0 a1 a0}
+; r1 = {d1 d0 b1 b0}
+; r2 = {c3 c2 a3 a2}
+; r3 = {d3 d2 b3 b2}
+;
+; output looks like: {o0 r1 o2 r3}
+; o0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; o2 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+ ; vshufps does not cross the mid-way boundary and hence is cheaper
+ vshufps %%o0, %%r0, %%r1, 0x44 ; o0 = {d0 c0 b0 a0}
+ vshufps %%r1, %%r0, %%r1, 0xEE ; r1 = {d1 d0 b1 b0}
+
+ vshufps %%o2, %%r2, %%r3, 0x44 ; o1 = {d2 c2 b2 a2}
+ vshufps %%r3, %%r2, %%r3, 0xEE ; r3 = {d3 c3 b3 a3}
+%endmacro
+
+%endif ;; _TRANSPOSE_AVX2_ASM_
diff --git a/src/spdk/intel-ipsec-mb/include/transpose_avx512.asm b/src/spdk/intel-ipsec-mb/include/transpose_avx512.asm
new file mode 100644
index 000000000..6937ceb00
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/transpose_avx512.asm
@@ -0,0 +1,497 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%ifndef _TRANSPOSE_AVX512_ASM_
+%define _TRANSPOSE_AVX512_ASM_
+
+%include "include/reg_sizes.asm"
+
+section .data
+default rel
+align 64
+PSHUFFLE_TRANSPOSE_MASK1: dq 0x0000000000000000
+ dq 0x0000000000000001
+ dq 0x0000000000000008
+ dq 0x0000000000000009
+ dq 0x0000000000000004
+ dq 0x0000000000000005
+ dq 0x000000000000000C
+ dq 0x000000000000000D
+
+align 64
+PSHUFFLE_TRANSPOSE_MASK2: dq 0x0000000000000002
+ dq 0x0000000000000003
+ dq 0x000000000000000A
+ dq 0x000000000000000B
+ dq 0x0000000000000006
+ dq 0x0000000000000007
+ dq 0x000000000000000E
+ dq 0x000000000000000F
+
+
+; LOAD FIRST 8 LANES FOR 16x16 32-BIT TRANSPOSE
+;
+; r0-r15 [out] zmm registers which will contain the data to be transposed
+; addr0-addr7 [in] pointers to the next 64-byte block of data to be fetch for the first 8 lanes
+; ptr_offset [in] offset to be applied on all pointers (addr0-addr7)
+%macro TRANSPOSE16_U32_LOAD_FIRST8 25
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%r8 %9
+%define %%r9 %10
+%define %%r10 %11
+%define %%r11 %12
+%define %%r12 %13
+%define %%r13 %14
+%define %%r14 %15
+%define %%r15 %16
+%define %%addr0 %17
+%define %%addr1 %18
+%define %%addr2 %19
+%define %%addr3 %20
+%define %%addr4 %21
+%define %%addr5 %22
+%define %%addr6 %23
+%define %%addr7 %24
+%define %%ptr_offset %25
+
+; Expected output data
+;
+; r0 = {X X X X X X X X a7 a6 a5 a4 a3 a2 a1 a0}
+; r1 = {X X X X X X X X b7 b6 b5 b4 b3 b2 b1 b0}
+; r2 = {X X X X X X X X c7 c6 c5 c4 c3 c2 c1 c0}
+; r3 = {X X X X X X X X d7 d6 d5 d4 d3 d2 d1 d0}
+; r4 = {X X X X X X X X e7 e6 e5 e4 e3 e2 e1 e0}
+; r5 = {X X X X X X X X f7 f6 f5 f4 f3 f2 f1 f0}
+; r6 = {X X X X X X X X g7 g6 g5 g4 g3 g2 g1 g0}
+; r7 = {X X X X X X X X h7 h6 h5 h4 h3 h2 h1 h0}
+; r8 = {X X X X X X X X a15 a14 a13 a12 a11 a10 a9 a8}
+; r9 = {X X X X X X X X b15 b14 b13 b12 b11 b10 b9 b8}
+; r10 = {X X X X X X X X c15 c14 c13 c12 c11 c10 c9 c8}
+; r11 = {X X X X X X X X d15 d14 d13 d12 d11 d10 d9 d8}
+; r12 = {X X X X X X X X e15 e14 e13 e12 e11 e10 e9 e8}
+; r13 = {X X X X X X X X f15 f14 f13 f12 f11 f10 f9 f8}
+; r14 = {X X X X X X X X g15 g14 g13 g12 g11 g10 g9 g8}
+; r15 = {X X X X X X X X h15 h14 h13 h12 h11 h10 h9 h8}
+ vmovups YWORD(%%r0),[%%addr0+%%ptr_offset]
+ vmovups YWORD(%%r1),[%%addr1+%%ptr_offset]
+ vmovups YWORD(%%r2),[%%addr2+%%ptr_offset]
+ vmovups YWORD(%%r3),[%%addr3+%%ptr_offset]
+ vmovups YWORD(%%r4),[%%addr4+%%ptr_offset]
+ vmovups YWORD(%%r5),[%%addr5+%%ptr_offset]
+ vmovups YWORD(%%r6),[%%addr6+%%ptr_offset]
+ vmovups YWORD(%%r7),[%%addr7+%%ptr_offset]
+ vmovups YWORD(%%r8),[%%addr0+%%ptr_offset+32]
+ vmovups YWORD(%%r9),[%%addr1+%%ptr_offset+32]
+ vmovups YWORD(%%r10),[%%addr2+%%ptr_offset+32]
+ vmovups YWORD(%%r11),[%%addr3+%%ptr_offset+32]
+ vmovups YWORD(%%r12),[%%addr4+%%ptr_offset+32]
+ vmovups YWORD(%%r13),[%%addr5+%%ptr_offset+32]
+ vmovups YWORD(%%r14),[%%addr6+%%ptr_offset+32]
+ vmovups YWORD(%%r15),[%%addr7+%%ptr_offset+32]
+
+%endmacro
+
+; LOAD LAST 8 LANES FOR 16x16 32-BIT TRANSPOSE
+;
+; r0-r15 [in/out] zmm registers which will contain the data to be transposed
+; addr0-addr7 [in] pointers to the next 64-byte block of data to be fetch for the last 8 lanes
+; ptr_offset [in] offset to be applied on all pointers (addr0-addr7)
+%macro TRANSPOSE16_U32_LOAD_LAST8 25
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%r8 %9
+%define %%r9 %10
+%define %%r10 %11
+%define %%r11 %12
+%define %%r12 %13
+%define %%r13 %14
+%define %%r14 %15
+%define %%r15 %16
+%define %%addr0 %17
+%define %%addr1 %18
+%define %%addr2 %19
+%define %%addr3 %20
+%define %%addr4 %21
+%define %%addr5 %22
+%define %%addr6 %23
+%define %%addr7 %24
+%define %%ptr_offset %25
+
+; Expected output data
+;
+; r0 = {i7 i6 i5 i4 i3 i2 i1 i0 a7 a6 a5 a4 a3 a2 a1 a0}
+; r1 = {j7 j6 j5 j4 j3 j2 j1 j0 b7 b6 b5 b4 b3 b2 b1 b0}
+; r2 = {k7 k6 k5 k4 k3 k2 k1 k0 c7 c6 c5 c4 c3 c2 c1 c0}
+; r3 = {l7 l6 l5 l4 l3 l2 l1 l0 d7 d6 d5 d4 d3 d2 d1 d0}
+; r4 = {m7 m6 m5 m4 m3 m2 m1 m0 e7 e6 e5 e4 e3 e2 e1 e0}
+; r5 = {n7 n6 n5 n4 n3 n2 n1 n0 f7 f6 f5 f4 f3 f2 f1 f0}
+; r6 = {o7 o6 o5 o4 o3 o2 o1 o0 g7 g6 g5 g4 g3 g2 g1 g0}
+; r7 = {p7 p6 p5 p4 p3 p2 p1 p0 h7 h6 h5 h4 h3 h2 h1 h0}
+; r8 = {i15 i14 i13 i12 i11 i10 i9 i8 a15 a14 a13 a12 a11 a10 a9 a8}
+; r9 = {j15 j14 j13 j12 j11 j10 j9 j8 b15 b14 b13 b12 b11 b10 b9 b8}
+; r10 = {k15 k14 k13 k12 k11 k10 k9 k8 c15 c14 c13 c12 c11 c10 c9 c8}
+; r11 = {l15 l14 l13 l12 l11 l10 l9 l8 d15 d14 d13 d12 d11 d10 d9 d8}
+; r12 = {m15 m14 m13 m12 m11 m10 m9 m8 e15 e14 e13 e12 e11 e10 e9 e8}
+; r13 = {n15 n14 n13 n12 n11 n10 n9 n8 f15 f14 f13 f12 f11 f10 f9 f8}
+; r14 = {o15 o14 o13 o12 o11 o10 o9 o8 g15 g14 g13 g12 g11 g10 g9 g8}
+; r15 = {p15 p14 p13 p12 p11 p10 p9 p8 h15 h14 h13 h12 h11 h10 h9 h8}
+
+ vinserti64x4 %%r0, %%r0, [%%addr0+%%ptr_offset], 0x01
+ vinserti64x4 %%r1, %%r1, [%%addr1+%%ptr_offset], 0x01
+ vinserti64x4 %%r2, %%r2, [%%addr2+%%ptr_offset], 0x01
+ vinserti64x4 %%r3, %%r3, [%%addr3+%%ptr_offset], 0x01
+ vinserti64x4 %%r4, %%r4, [%%addr4+%%ptr_offset], 0x01
+ vinserti64x4 %%r5, %%r5, [%%addr5+%%ptr_offset], 0x01
+ vinserti64x4 %%r6, %%r6, [%%addr6+%%ptr_offset], 0x01
+ vinserti64x4 %%r7, %%r7, [%%addr7+%%ptr_offset], 0x01
+ vinserti64x4 %%r8, %%r8, [%%addr0+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r9, %%r9, [%%addr1+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r10, %%r10, [%%addr2+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r11, %%r11, [%%addr3+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r12, %%r12, [%%addr4+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r13, %%r13, [%%addr5+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r14, %%r14, [%%addr6+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r15, %%r15, [%%addr7+%%ptr_offset+32], 0x01
+
+%endmacro
+
+; 16x16 32-BIT TRANSPOSE
+;
+; Before calling this macro, TRANSPOSE16_U32_LOAD_FIRST8 and TRANSPOSE16_U32_LOAD_LAST8
+; must be called.
+;
+; r0-r7 [in/out] zmm registers containing bytes 0-31 of each 64B block (e.g. zmm0 = [i7-i0 a7-a0])
+; r8-r15 [in/out] zmm registers containing bytes 32-63 of each 64B block (e.g. zmm8 = [i15-i8 a15-a8])
+; t0-t1 [clobbered] zmm temporary registers
+; m0-m1 [clobbered] zmm registers for shuffle mask storing
+%macro TRANSPOSE16_U32 20
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%r8 %9
+%define %%r9 %10
+%define %%r10 %11
+%define %%r11 %12
+%define %%r12 %13
+%define %%r13 %14
+%define %%r14 %15
+%define %%r15 %16
+%define %%t0 %17
+%define %%t1 %18
+%define %%m0 %19
+%define %%m1 %20
+
+; Input data
+;
+; r0 = {i7 i6 i5 i4 i3 i2 i1 i0 a7 a6 a5 a4 a3 a2 a1 a0}
+; r1 = {j7 j6 j5 j4 j3 j2 j1 j0 b7 b6 b5 b4 b3 b2 b1 b0}
+; r2 = {k7 k6 k5 k4 k3 k2 k1 k0 c7 c6 c5 c4 c3 c2 c1 c0}
+; r3 = {l7 l6 l5 l4 l3 l2 l1 l0 d7 d6 d5 d4 d3 d2 d1 d0}
+; r4 = {m7 m6 m5 m4 m3 m2 m1 m0 e7 e6 e5 e4 e3 e2 e1 e0}
+; r5 = {n7 n6 n5 n4 n3 n2 n1 n0 f7 f6 f5 f4 f3 f2 f1 f0}
+; r6 = {o7 o6 o5 o4 o3 o2 o1 o0 g7 g6 g5 g4 g3 g2 g1 g0}
+; r7 = {p7 p6 p5 p4 p3 p2 p1 p0 h7 h6 h5 h4 h3 h2 h1 h0}
+; r8 = {i15 i14 i13 i12 i11 i10 i9 i8 a15 a14 a13 a12 a11 a10 a9 a8}
+; r9 = {j15 j14 j13 j12 j11 j10 j9 j8 b15 b14 b13 b12 b11 b10 b9 b8}
+; r10 = {k15 k14 k13 k12 k11 k10 k9 k8 c15 c14 c13 c12 c11 c10 c9 c8}
+; r11 = {l15 l14 l13 l12 l11 l10 l9 l8 d15 d14 d13 d12 d11 d10 d9 d8}
+; r12 = {m15 m14 m13 m12 m11 m10 m9 m8 e15 e14 e13 e12 e11 e10 e9 e8}
+; r13 = {n15 n14 n13 n12 n11 n10 n9 n8 f15 f14 f13 f12 f11 f10 f9 f8}
+; r14 = {o15 o14 o13 o12 o11 o10 o9 o8 g15 g14 g13 g12 g11 g10 g9 g8}
+; r15 = {p15 p14 p13 p12 p11 p10 p9 p8 h15 h14 h13 h12 h11 h10 h9 h8}
+
+; Expected output data
+;
+; r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
+; r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
+; r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
+; r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
+; r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
+; r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
+; r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
+; r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
+; r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
+; r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
+; r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
+; r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
+; r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
+; r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
+; r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
+; r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
+
+
+ ; process first 4 rows (r0..r3)
+ vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {j5 j4 i5 i4 j1 j0 i1 i0 b5 b4 a5 a4 b1 b0 a1 a0}
+ vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {j7 j6 i7 i6 j3 j2 i3 i2 b7 b6 a7 a6 b3 b2 a3 a2}
+ vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {l5 l4 k5 k4 l1 l0 k1 k0 d5 d4 c5 c4 d1 d0 c1 c0}
+ vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {l7 l6 k7 k6 l3 l2 k3 k2 d7 d6 c7 c6 d3 d2 c3 c2}
+
+ vshufps %%r3, %%t0, %%t1, 0xDD ; r3 = {l5 k5 j5 i5 l1 k1 j1 i1 d5 c5 b5 a5 d1 c1 b1 a1}
+ vshufps %%r1, %%r0, %%r2, 0x88 ; r1 = {l6 k6 j6 i6 l2 k2 j2 i2 d6 c6 b6 a6 d2 c2 b2 a2}
+ vshufps %%r0, %%r0, %%r2, 0xDD ; r0 = {l7 k7 j7 i7 l3 k3 j3 i3 d7 c7 b7 a7 d3 c3 b3 a3}
+ vshufps %%t0, %%t0, %%t1, 0x88 ; t0 = {l4 k4 j4 i4 l0 k0 j0 i0 d4 c4 b4 a4 d0 c0 b0 a0}
+
+ ; Load permute masks
+ vmovdqa64 %%m0, [PSHUFFLE_TRANSPOSE_MASK1]
+ vmovdqa64 %%m1, [PSHUFFLE_TRANSPOSE_MASK2]
+
+ ; process second 4 rows (r4..r7)
+ vshufps %%r2, %%r4, %%r5, 0x44 ; r2 = {n5 n4 m5 m4 n1 n0 m1 m0 f5 f4 e5 e4 f1 f0 e1 e0}
+ vshufps %%r4, %%r4, %%r5, 0xEE ; r4 = {n7 n6 m7 m6 n3 n2 m3 m2 f7 f6 e7 e6 f3 f2 e3 e2}
+ vshufps %%t1, %%r6, %%r7, 0x44 ; t1 = {p5 p4 o5 o4 p1 p0 o1 o0 h5 h4 g5 g4 h1 h0 g1 g0}
+ vshufps %%r6, %%r6, %%r7, 0xEE ; r6 = {p7 p6 o7 o6 p3 p2 o3 o2 h7 h6 g7 g6 h3 h2 g3 g2}
+
+ vshufps %%r7, %%r2, %%t1, 0xDD ; r7 = {p5 o5 n5 m5 p1 o1 n1 m1 h5 g5 f5 e5 h1 g1 f1 e1}
+ vshufps %%r5, %%r4, %%r6, 0x88 ; r5 = {p6 o6 n6 m6 p2 o2 n2 m2 h6 g6 f6 e6 h2 g2 f2 e2}
+ vshufps %%r4, %%r4, %%r6, 0xDD ; r4 = {p7 o7 n7 m7 p3 o3 n3 m3 h7 g7 f7 e7 h3 g3 f3 e3}
+ vshufps %%r2, %%r2, %%t1, 0x88 ; r2 = {p4 o4 n4 m4 p0 o0 n0 m0 h4 g4 f4 e4 h0 g0 f0 e0}
+
+ ; process third 4 rows (r8..r11)
+ vshufps %%r6, %%r8, %%r9, 0x44 ; r6 = {j13 j12 i13 i12 j9 j8 i9 i8 b13 b12 a13 a12 b9 b8 a9 a8 }
+ vshufps %%r8, %%r8, %%r9, 0xEE ; r8 = {j15 j14 i15 i14 j11 j10 i11 i10 b15 b14 a15 a14 b11 b10 a11 a10}
+ vshufps %%t1, %%r10, %%r11, 0x44 ; t1 = {l13 l12 k13 k12 l9 l8 k9 k8 d13 d12 c13 c12 d9 d8 c9 c8 }
+ vshufps %%r10, %%r10, %%r11, 0xEE ; r10 = {l15 l14 k15 k14 l11 l10 k11 k10 d15 d14 c15 c14 d11 d10 c11 c10}
+
+ vshufps %%r11, %%r6, %%t1, 0xDD ; r11 = {l13 k13 j13 i13 l9 k9 j9 i9 d13 c13 b13 a13 d9 c9 b9 a9 }
+ vshufps %%r9, %%r8, %%r10, 0x88 ; r9 = {l14 k14 j14 i14 l10 k10 j10 i10 d14 c14 b14 a14 d10 c10 b10 a10}
+ vshufps %%r8, %%r8, %%r10, 0xDD ; r8 = {l15 k15 j15 i15 l11 k11 j11 i11 d15 c15 b15 a15 d11 c11 b11 a11}
+ vshufps %%r6, %%r6, %%t1, 0x88 ; r6 = {l12 k12 j12 i12 l8 k8 j8 i8 d12 c12 b12 a12 d8 c8 b8 a8 }
+
+ ; process fourth 4 rows (r12..r15)
+ vshufps %%r10, %%r12, %%r13, 0x44 ; r10 = {n13 n12 m13 m12 n9 n8 m9 m8 f13 f12 e13 e12 f9 f8 e9 e8 }
+ vshufps %%r12, %%r12, %%r13, 0xEE ; r12 = {n15 n14 m15 m14 n11 n10 m11 m10 f15 f14 e15 e14 f11 f10 e11 e10}
+ vshufps %%t1, %%r14, %%r15, 0x44 ; t1 = {p13 p12 o13 o12 p9 p8 o9 o8 h13 h12 g13 g12 h9 h8 g9 g8 }
+ vshufps %%r14, %%r14, %%r15, 0xEE ; r14 = {p15 p14 o15 o14 p11 p10 o11 o10 h15 h14 g15 g14 h11 h10 g11 g10}
+
+ vshufps %%r15, %%r10, %%t1, 0xDD ; r15 = {p13 o13 n13 m13 p9 o9 n9 m9 h13 g13 f13 e13 h9 g9 f9 e9 }
+ vshufps %%r13, %%r12, %%r14, 0x88 ; r13 = {p14 o14 n14 m14 p10 o10 n10 m10 h14 g14 f14 e14 h10 g10 f10 e10}
+ vshufps %%r12, %%r12, %%r14, 0xDD ; r12 = {p15 o15 n15 m15 p11 o11 n11 m11 h15 g15 f15 e15 h11 g11 f11 e11}
+ vshufps %%r10, %%r10, %%t1, 0x88 ; r10 = {p12 o12 n12 m12 p8 o8 n8 m8 h12 g12 f12 e12 h8 g8 f8 e8 }
+
+ ; perform final shuffles on bottom half, producing r8-r15
+ vmovdqu32 %%t1, %%m0
+ vpermi2q %%t1, %%r9, %%r13 ; t1 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
+ vmovdqu32 %%r14, %%m1
+ vpermi2q %%r14, %%r9, %%r13 ; r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
+
+ vmovdqu32 %%r9, %%m0
+ vpermi2q %%r9, %%r11, %%r15 ; r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
+ vmovdqu32 %%r13, %%m1
+ vpermi2q %%r13, %%r11, %%r15 ; r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
+
+ vmovdqu32 %%r11, %%m0
+ vpermi2q %%r11, %%r8, %%r12 ; r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
+ vmovdqu32 %%r15, %%m1
+ vpermi2q %%r15, %%r8, %%r12 ; r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
+
+ vmovdqu32 %%r8, %%m0
+ vpermi2q %%r8, %%r6, %%r10 ; r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
+ vmovdqu32 %%r12, %%m1
+ vpermi2q %%r12, %%r6, %%r10 ; r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
+
+ vmovdqu32 %%r10, %%t1 ; r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
+
+ ; perform final shuffles on top half, producing r0-r7
+ vmovdqu32 %%t1, %%m0
+ vpermi2q %%t1, %%r1, %%r5 ; t1 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
+ vmovdqu32 %%r6, %%m1
+ vpermi2q %%r6, %%r1, %%r5 ; r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
+
+ vmovdqu32 %%r1, %%m0
+ vpermi2q %%r1, %%r3, %%r7 ; r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
+ vmovdqu32 %%r5, %%m1
+ vpermi2q %%r5, %%r3, %%r7 ; r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
+
+ vmovdqu32 %%r3, %%m0
+ vpermi2q %%r3, %%r0, %%r4 ; r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
+ vmovdqu32 %%r7, %%m1
+ vpermi2q %%r7, %%r0, %%r4 ; r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
+
+ vmovdqu32 %%r0, %%m0
+ vpermi2q %%r0, %%t0, %%r2 ; r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
+ vmovdqu32 %%r4, %%m1
+ vpermi2q %%r4, %%t0, %%r2 ; r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
+
+ vmovdqu32 %%r2, %%t1 ; r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
+
+%endmacro
+
+; LOAD ALL 8 LANES FOR 8x8 64-BIT TRANSPOSE
+;
+; r0-r7 [out] zmm registers which will contain the data to be transposed
+; addr0-addr7 [in] pointers to the next 64-byte block of data to be fetch for all 8 lanes
+; ptr_offset [in] offset to be applied on all pointers (addr0-addr7)
+%macro TRANSPOSE8_U64_LOAD8 17
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%addr0 %9
+%define %%addr1 %10
+%define %%addr2 %11
+%define %%addr3 %12
+%define %%addr4 %13
+%define %%addr5 %14
+%define %%addr6 %15
+%define %%addr7 %16
+%define %%ptr_offset %17
+
+; Expected output data
+;
+; r0 = {e3 e2 e1 e0 a3 a2 a1 a0}
+; r1 = {f3 f2 f1 f0 b3 b2 b1 b0}
+; r2 = {g3 g2 g1 g0 c3 c2 c1 c0}
+; r3 = {h3 h2 h1 h0 d3 d2 d1 d0}
+; r4 = {e7 e6 e5 e4 a7 a6 a5 a4}
+; r5 = {f7 f6 f5 f4 b7 b6 b5 b4}
+; r6 = {g7 g6 g5 g4 c7 c6 c5 c4}
+; r7 = {h7 h6 h5 h4 d7 d6 d5 d4}
+
+ vmovups YWORD(%%r0),[%%addr0+%%ptr_offset]
+ vmovups YWORD(%%r1),[%%addr1+%%ptr_offset]
+ vmovups YWORD(%%r2),[%%addr2+%%ptr_offset]
+ vmovups YWORD(%%r3),[%%addr3+%%ptr_offset]
+ vmovups YWORD(%%r4),[%%addr0+%%ptr_offset+32]
+ vmovups YWORD(%%r5),[%%addr1+%%ptr_offset+32]
+ vmovups YWORD(%%r6),[%%addr2+%%ptr_offset+32]
+ vmovups YWORD(%%r7),[%%addr3+%%ptr_offset+32]
+
+ vinserti64x4 %%r0, %%r0, [%%addr4+%%ptr_offset], 0x01
+ vinserti64x4 %%r1, %%r1, [%%addr5+%%ptr_offset], 0x01
+ vinserti64x4 %%r2, %%r2, [%%addr6+%%ptr_offset], 0x01
+ vinserti64x4 %%r3, %%r3, [%%addr7+%%ptr_offset], 0x01
+ vinserti64x4 %%r4, %%r4, [%%addr4+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r5, %%r5, [%%addr5+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r6, %%r6, [%%addr6+%%ptr_offset+32], 0x01
+ vinserti64x4 %%r7, %%r7, [%%addr7+%%ptr_offset+32], 0x01
+
+%endmacro
+
+; 8x8 64-BIT TRANSPOSE
+;
+; Before calling this macro, TRANSPOSE8_U64_LOAD8 must be called.
+;
+; r0-r3 [in/out] zmm registers containing bytes 0-31 of each 64B block (e.g. zmm0 = [e3-e0 a3-a0])
+; r4-r7 [in/out] zmm registers containing bytes 32-63 of each 64B block (e.g. zmm4 = [e4-e7 a4-a7])
+; t0-t1 [clobbered] zmm temporary registers
+; PERM_INDEX1-2 [clobbered] zmm registers for shuffle mask storing
+%macro TRANSPOSE8_U64 12
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%r4 %5
+%define %%r5 %6
+%define %%r6 %7
+%define %%r7 %8
+%define %%t0 %9
+%define %%t1 %10
+%define %%PERM_INDEX1 %11
+%define %%PERM_INDEX2 %12
+
+; each x(i) is 64 bits, 8 * 64 = 512 ==> a full digest length, 64-bit double precision quantities
+
+; Input data
+;
+; r0 = {e3 e2 e1 e0 a3 a2 a1 a0}
+; r1 = {f3 f2 f1 f0 b3 b2 b1 b0}
+; r2 = {g3 g2 g1 g0 c3 c2 c1 c0}
+; r3 = {h3 h2 h1 h0 d3 d2 d1 d0}
+; r4 = {e7 e6 e5 e4 a7 a6 a5 a4}
+; r5 = {f7 f6 f5 f4 b7 b6 b5 b4}
+; r6 = {g7 g6 g5 g4 c7 c6 c5 c4}
+; r7 = {h7 h6 h5 h4 d7 d6 d5 d4}
+;
+; Expected output data
+;
+; r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
+; r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
+; r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
+; r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
+; r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
+; r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
+; r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
+; r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
+
+ ;; ;;; will not get clobbered
+ vmovdqa32 %%PERM_INDEX1, [PSHUFFLE_TRANSPOSE_MASK1] ; temp
+ vmovdqa32 %%PERM_INDEX2, [PSHUFFLE_TRANSPOSE_MASK2] ; temp
+
+ ; process top half (r0..r3)
+ vshufpd %%t0, %%r0, %%r1, 0x00 ; t0 = {f2 e2 f0 e0 b2 a2 b0 a0}
+ vshufpd %%r1, %%r0, %%r1, 0xFF ; r0 = {f3 e3 f1 e1 b3 a3 b1 a1}
+ vshufpd %%t1, %%r2, %%r3, 0x00 ; t1 = {h2 g2 h0 g0 d2 c2 d0 c0}
+ vshufpd %%r2, %%r2, %%r3, 0xFF ; r2 = {h3 g3 h1 g1 d3 c3 d1 c1}
+
+ vmovdqa32 %%r3, %%r1
+ vpermt2q %%r1, %%PERM_INDEX1,%%r2 ; r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
+ vpermt2q %%r3, %%PERM_INDEX2,%%r2 ; r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
+
+ vmovdqa32 %%r0, %%t0
+ vmovdqa32 %%r2, %%t0
+ vpermt2q %%r0, %%PERM_INDEX1,%%t1 ; r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
+ vpermt2q %%r2, %%PERM_INDEX2,%%t1 ; r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
+
+ ; process top bottom (r4..r7)
+ vshufpd %%t0, %%r4, %%r5, 0x00 ; t0 = {f6 e6 f4 e4 b6 a6 b4 a4}
+ vshufpd %%r5, %%r4, %%r5, 0xFF ; r0 = {f7 e7 f5 e5 b7 a7 b5 a5}
+ vshufpd %%t1, %%r6, %%r7, 0x00 ; t1 = {h6 g6 h4 g4 d6 c6 d4 c4}
+ vshufpd %%r6, %%r6, %%r7, 0xFF ; r2 = {h7 g7 h5 g5 d7 c7 d5 c5}
+
+ vmovdqa32 %%r7, %%r5
+ vpermt2q %%r5, %%PERM_INDEX1,%%r6 ; r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
+ vpermt2q %%r7, %%PERM_INDEX2,%%r6 ; r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
+
+ vmovdqa32 %%r4, %%t0
+ vmovdqa32 %%r6, %%t0
+ vpermt2q %%r4, %%PERM_INDEX1,%%t1 ; r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
+ vpermt2q %%r6, %%PERM_INDEX2,%%t1 ; r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
+%endmacro
+
+%endif ;; _TRANSPOSE_AVX512_ASM_
diff --git a/src/spdk/intel-ipsec-mb/include/wireless_common.asm b/src/spdk/intel-ipsec-mb/include/wireless_common.asm
new file mode 100644
index 000000000..811c2c256
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/wireless_common.asm
@@ -0,0 +1,128 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+
+section .data
+default rel
+align 16
+swap_mask:
+db 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04
+db 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c
+
+section .text
+
+; Function which XOR's 64 bytes of the input buffer with 64 bytes of the
+; KeyStream, placing the result in the output buffer.
+; KeyStream bytes must be swapped on 32 bit boundary before this operation
+%macro xor_keystream 1
+%define %%SIMDTYPE %1 ; "SSE" or "AVX"
+
+%ifidn %%SIMDTYPE, AVX
+ %define %%MOVDQU vmovdqu
+ %define %%MOVDQA vmovdqa
+ %define %%PXOR vpxor
+ %define %%PSHUFB vpshufb
+%else
+ %define %%MOVDQU movdqu
+ %define %%MOVDQA movdqa
+ %define %%PXOR pxor
+ %define %%PSHUFB pshufb
+%endif
+%ifdef LINUX
+ %define %%pIn rdi
+ %define %%pOut rsi
+ %define %%pKS rdx
+%else
+ %define %%pIn rcx
+ %define %%pOut rdx
+ %define %%pKS r8
+
+ mov rax, rsp
+ sub rsp, 48
+ and rsp, ~15
+ %%MOVDQA [rsp], xmm6
+ %%MOVDQA [rsp + 16], xmm7
+ %%MOVDQA [rsp + 32], xmm8
+%endif
+ %define XKEY0 xmm0
+ %define XKEY1 xmm1
+ %define XKEY2 xmm2
+ %define XKEY3 xmm3
+ %define XIN0 xmm4
+ %define XIN1 xmm5
+ %define XIN2 xmm6
+ %define XIN3 xmm7
+ %define XSHUF xmm8
+
+ %%MOVDQA XSHUF, [rel swap_mask]
+ %%MOVDQA XKEY0, [%%pKS]
+ %%MOVDQA XKEY1, [%%pKS + 16]
+ %%MOVDQA XKEY2, [%%pKS + 32]
+ %%MOVDQA XKEY3, [%%pKS + 48]
+
+ %%PSHUFB XKEY0, XSHUF
+ %%PSHUFB XKEY1, XSHUF
+ %%PSHUFB XKEY2, XSHUF
+ %%PSHUFB XKEY3, XSHUF
+
+ %%MOVDQU XIN0, [%%pIn]
+ %%MOVDQU XIN1, [%%pIn + 16]
+ %%MOVDQU XIN2, [%%pIn + 32]
+ %%MOVDQU XIN3, [%%pIn + 48]
+
+ %%PXOR XKEY0, XIN0
+ %%PXOR XKEY1, XIN1
+ %%PXOR XKEY2, XIN2
+ %%PXOR XKEY3, XIN3
+
+ %%MOVDQU [%%pOut], XKEY0
+ %%MOVDQU [%%pOut + 16], XKEY1
+ %%MOVDQU [%%pOut + 32], XKEY2
+ %%MOVDQU [%%pOut + 48], XKEY3
+
+%ifndef LINUX
+ %%MOVDQA xmm6, [rsp]
+ %%MOVDQA xmm7, [rsp + 16]
+ %%MOVDQA xmm8, [rsp + 32]
+ mov rsp,rax
+%endif
+%endmacro
+
+MKGLOBAL(asm_XorKeyStream64B_avx,function,internal)
+asm_XorKeyStream64B_avx:
+ xor_keystream AVX
+ ret
+
+MKGLOBAL(asm_XorKeyStream64B_sse,function,internal)
+asm_XorKeyStream64B_sse:
+ xor_keystream SSE
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/include/wireless_common.h b/src/spdk/intel-ipsec-mb/include/wireless_common.h
new file mode 100644
index 000000000..a0ba60019
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/wireless_common.h
@@ -0,0 +1,216 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef _WIRELESS_COMMON_H_
+#define _WIRELESS_COMMON_H_
+
+#include <string.h>
+#ifdef LINUX
+#include <x86intrin.h>
+#else
+#include <intrin.h>
+#endif
+
+#define NUM_PACKETS_1 1
+#define NUM_PACKETS_2 2
+#define NUM_PACKETS_3 3
+#define NUM_PACKETS_4 4
+#define NUM_PACKETS_8 8
+#define NUM_PACKETS_16 16
+
+#ifdef LINUX
+#define BSWAP32 __builtin_bswap32
+#define BSWAP64 __builtin_bswap64
+#else
+#define BSWAP32 _byteswap_ulong
+#define BSWAP64 _byteswap_uint64
+#endif
+
+typedef union _m128_u {
+ uint8_t byte[16];
+ uint16_t word[8];
+ uint32_t dword[4];
+ uint64_t qword[2];
+ __m128i m;
+} m128_t;
+
+typedef union _m64_u {
+ uint8_t byte[8];
+ uint16_t word[4];
+ uint32_t dword[2];
+ uint64_t m;
+} m64_t;
+
+static inline uint32_t bswap4(const uint32_t val)
+{
+ return ((val >> 24) | /**< A*/
+ ((val & 0xff0000) >> 8) | /**< B*/
+ ((val & 0xff00) << 8) | /**< C*/
+ (val << 24)); /**< D*/
+}
+
+/*************************************************************************
+* @description - this function is used to copy the right number of bytes
+* from the source to destination buffer
+*
+* @param pSrc [IN] - pointer to an input Byte array (at least len bytes
+* available)
+* @param pDst [IN] - pointer to the output buffer (at least len bytes available)
+* @param len [IN] - length in bytes to copy (0 to 4)
+*
+*************************************************************************/
+static inline void memcpy_keystream_32(uint8_t *pDst,
+ const uint8_t *pSrc,
+ const uint32_t len)
+{
+ switch (len) {
+ case 4:
+ *(uint32_t *)pDst = *(const uint32_t *)pSrc;
+ break;
+ case 3:
+ pDst[2] = pSrc[2];
+ /* fall-through */
+ case 2:
+ pDst[1] = pSrc[1];
+ /* fall-through */
+ case 1:
+ pDst[0] = pSrc[0];
+ /* fall-through */
+ }
+}
+
+/*************************************************************************
+* @description - this function is used to XOR the right number of bytes
+* from a keystrea and a source into a destination buffer
+*
+* @param pSrc [IN] - pointer to an input Byte array (at least 4 bytes available)
+* @param pDst [IN] - pointer to the output buffer (at least 4 bytes available)
+* @param KS [IN] - 4 bytes of keystream number, must be reversed
+* into network byte order before XOR
+*
+*************************************************************************/
+static inline void xor_keystream_reverse_32(uint8_t *pDst,
+ const uint8_t *pSrc,
+ const uint32_t KS)
+{
+ *(uint32_t *)pDst = (*(const uint32_t *)pSrc) ^ BSWAP32(KS);
+}
+
+/******************************************************************************
+ * @description - this function is used to do a keystream operation
+ * @param pSrc [IN] - pointer to an input Byte array (at least 8 bytes
+ * available)
+ * @param pDst [IN] - pointer to the output buffer (at least 8 bytes available)
+ * @param keyStream [IN] - the Keystream value (8 bytes)
+ ******************************************************************************/
+static inline const uint8_t *
+xor_keystrm_rev(uint8_t *pDst, const uint8_t *pSrc, uint64_t keyStream)
+{
+ /* default: XOR ONLY, read the input buffer, update the output buffer */
+ const uint64_t *pSrc64 = (const uint64_t *)pSrc;
+ uint64_t *pDst64 = (uint64_t *)pDst;
+ *pDst64 = *pSrc64 ^ BSWAP64(keyStream);
+ return (const uint8_t *)(pSrc64 + 1);
+}
+
+/******************************************************************************
+ * @description - this function is used to copy the right number of bytes
+ * from the source to destination buffer
+ * @param pSrc [IN] - pointer to an input Byte array (at least len bytes
+ * available)
+ * @param pDst [IN] - pointer to the output buffer (at least len bytes
+ * available)
+ * @param len [IN] - length in bytes to copy
+ ******************************************************************************/
+static inline void
+memcpy_keystrm(uint8_t *pDst, const uint8_t *pSrc, const uint32_t len)
+{
+ switch (len) {
+ case 8:
+ *(uint64_t *)pDst = *(const uint64_t *)pSrc;
+ break;
+ case 7:
+ pDst[6] = pSrc[6];
+ /* fall-through */
+ case 6:
+ pDst[5] = pSrc[5];
+ /* fall-through */
+ case 5:
+ pDst[4] = pSrc[4];
+ /* fall-through */
+ case 4:
+ *(uint32_t *)pDst = *(const uint32_t *)pSrc;
+ break;
+ case 3:
+ pDst[2] = pSrc[2];
+ /* fall-through */
+ case 2:
+ pDst[1] = pSrc[1];
+ /* fall-through */
+ case 1:
+ pDst[0] = pSrc[0];
+ /* fall-through */
+ }
+}
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external SSE function that XOR's 64 bytes of input
+ * with 64 bytes of keystream, swapping keystream bytes every 4 bytes.
+ *
+ * @param[in] pIn Pointer to the input buffer
+ * @param[out] pOut Pointer to the output buffer
+ * @param[in] pKey Pointer to the new 64 byte keystream
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_XorKeyStream64B_sse(const void *pIn, void *pOut,
+ const void *pKey);
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external AVX function that XOR's 64 bytes of input
+ * with 64 bytes of keystream, swapping keystream bytes every 4 bytes.
+ *
+ * @param[in] pIn Pointer to the input buffer
+ * @param[out] pOut Pointer to the output buffer
+ * @param[in] pKey Pointer to the new 64 byte keystream
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_XorKeyStream64B_avx(const void *pIn, void *pOut,
+ const void *pKey);
+
+#endif /* _WIRELESS_COMMON_H_ */
diff --git a/src/spdk/intel-ipsec-mb/include/zuc_common.asm b/src/spdk/intel-ipsec-mb/include/zuc_common.asm
new file mode 100644
index 000000000..4b9cdd3ec
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/zuc_common.asm
@@ -0,0 +1,740 @@
+;;
+;; Copyright (c) 2009-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+
+extern lookup_8bit_sse
+
+
+section .data
+default rel
+align 64
+S0:
+db 0x3e,0x72,0x5b,0x47,0xca,0xe0,0x00,0x33,0x04,0xd1,0x54,0x98,0x09,0xb9,0x6d,0xcb
+db 0x7b,0x1b,0xf9,0x32,0xaf,0x9d,0x6a,0xa5,0xb8,0x2d,0xfc,0x1d,0x08,0x53,0x03,0x90
+db 0x4d,0x4e,0x84,0x99,0xe4,0xce,0xd9,0x91,0xdd,0xb6,0x85,0x48,0x8b,0x29,0x6e,0xac
+db 0xcd,0xc1,0xf8,0x1e,0x73,0x43,0x69,0xc6,0xb5,0xbd,0xfd,0x39,0x63,0x20,0xd4,0x38
+db 0x76,0x7d,0xb2,0xa7,0xcf,0xed,0x57,0xc5,0xf3,0x2c,0xbb,0x14,0x21,0x06,0x55,0x9b
+db 0xe3,0xef,0x5e,0x31,0x4f,0x7f,0x5a,0xa4,0x0d,0x82,0x51,0x49,0x5f,0xba,0x58,0x1c
+db 0x4a,0x16,0xd5,0x17,0xa8,0x92,0x24,0x1f,0x8c,0xff,0xd8,0xae,0x2e,0x01,0xd3,0xad
+db 0x3b,0x4b,0xda,0x46,0xeb,0xc9,0xde,0x9a,0x8f,0x87,0xd7,0x3a,0x80,0x6f,0x2f,0xc8
+db 0xb1,0xb4,0x37,0xf7,0x0a,0x22,0x13,0x28,0x7c,0xcc,0x3c,0x89,0xc7,0xc3,0x96,0x56
+db 0x07,0xbf,0x7e,0xf0,0x0b,0x2b,0x97,0x52,0x35,0x41,0x79,0x61,0xa6,0x4c,0x10,0xfe
+db 0xbc,0x26,0x95,0x88,0x8a,0xb0,0xa3,0xfb,0xc0,0x18,0x94,0xf2,0xe1,0xe5,0xe9,0x5d
+db 0xd0,0xdc,0x11,0x66,0x64,0x5c,0xec,0x59,0x42,0x75,0x12,0xf5,0x74,0x9c,0xaa,0x23
+db 0x0e,0x86,0xab,0xbe,0x2a,0x02,0xe7,0x67,0xe6,0x44,0xa2,0x6c,0xc2,0x93,0x9f,0xf1
+db 0xf6,0xfa,0x36,0xd2,0x50,0x68,0x9e,0x62,0x71,0x15,0x3d,0xd6,0x40,0xc4,0xe2,0x0f
+db 0x8e,0x83,0x77,0x6b,0x25,0x05,0x3f,0x0c,0x30,0xea,0x70,0xb7,0xa1,0xe8,0xa9,0x65
+db 0x8d,0x27,0x1a,0xdb,0x81,0xb3,0xa0,0xf4,0x45,0x7a,0x19,0xdf,0xee,0x78,0x34,0x60
+
+S1:
+db 0x55,0xc2,0x63,0x71,0x3b,0xc8,0x47,0x86,0x9f,0x3c,0xda,0x5b,0x29,0xaa,0xfd,0x77
+db 0x8c,0xc5,0x94,0x0c,0xa6,0x1a,0x13,0x00,0xe3,0xa8,0x16,0x72,0x40,0xf9,0xf8,0x42
+db 0x44,0x26,0x68,0x96,0x81,0xd9,0x45,0x3e,0x10,0x76,0xc6,0xa7,0x8b,0x39,0x43,0xe1
+db 0x3a,0xb5,0x56,0x2a,0xc0,0x6d,0xb3,0x05,0x22,0x66,0xbf,0xdc,0x0b,0xfa,0x62,0x48
+db 0xdd,0x20,0x11,0x06,0x36,0xc9,0xc1,0xcf,0xf6,0x27,0x52,0xbb,0x69,0xf5,0xd4,0x87
+db 0x7f,0x84,0x4c,0xd2,0x9c,0x57,0xa4,0xbc,0x4f,0x9a,0xdf,0xfe,0xd6,0x8d,0x7a,0xeb
+db 0x2b,0x53,0xd8,0x5c,0xa1,0x14,0x17,0xfb,0x23,0xd5,0x7d,0x30,0x67,0x73,0x08,0x09
+db 0xee,0xb7,0x70,0x3f,0x61,0xb2,0x19,0x8e,0x4e,0xe5,0x4b,0x93,0x8f,0x5d,0xdb,0xa9
+db 0xad,0xf1,0xae,0x2e,0xcb,0x0d,0xfc,0xf4,0x2d,0x46,0x6e,0x1d,0x97,0xe8,0xd1,0xe9
+db 0x4d,0x37,0xa5,0x75,0x5e,0x83,0x9e,0xab,0x82,0x9d,0xb9,0x1c,0xe0,0xcd,0x49,0x89
+db 0x01,0xb6,0xbd,0x58,0x24,0xa2,0x5f,0x38,0x78,0x99,0x15,0x90,0x50,0xb8,0x95,0xe4
+db 0xd0,0x91,0xc7,0xce,0xed,0x0f,0xb4,0x6f,0xa0,0xcc,0xf0,0x02,0x4a,0x79,0xc3,0xde
+db 0xa3,0xef,0xea,0x51,0xe6,0x6b,0x18,0xec,0x1b,0x2c,0x80,0xf7,0x74,0xe7,0xff,0x21
+db 0x5a,0x6a,0x54,0x1e,0x41,0x31,0x92,0x35,0xc4,0x33,0x07,0x0a,0xba,0x7e,0x0e,0x34
+db 0x88,0xb1,0x98,0x7c,0xf3,0x3d,0x60,0x6c,0x7b,0xca,0xd3,0x1f,0x32,0x65,0x04,0x28
+db 0x64,0xbe,0x85,0x9b,0x2f,0x59,0x8a,0xd7,0xb0,0x25,0xac,0xaf,0x12,0x03,0xe2,0xf2
+
+EK_d:
+dw 0x44D7, 0x26BC, 0x626B, 0x135E, 0x5789, 0x35E2, 0x7135, 0x09AF,
+dw 0x4D78, 0x2F13, 0x6BC4, 0x1AF1, 0x5E26, 0x3C4D, 0x789A, 0x47AC
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+section .text
+
+%define OFFSET_FR1 (16*4)
+%define OFFSET_FR2 (17*4)
+%define OFFSET_BRC_X0 (18*4)
+%define OFFSET_BRC_X1 (19*4)
+%define OFFSET_BRC_X2 (20*4)
+%define OFFSET_BRC_X3 (21*4)
+
+;
+; BITS_REORG()
+;
+; params
+; %1 - round number
+; uses
+; eax, ebx, ecx, edx
+; return
+; updates r12d, r13d, r14d, r15d
+;
+%macro BITS_REORG 1
+ ;
+ ; r12d = LFSR_S15
+ ; eax = LFSR_S14
+ ; r13d = LFSR_S11
+ ; ebx = LFSR_S9
+ ; r14d = LFSR_S7
+ ; ecx = LFSR_S5
+ ; r15d = LFSR_S2
+ ; edx = LFSR_S0
+
+ mov r12d, [rsi + ((15 + %1) % 16)*4]
+ mov eax, [rsi + ((14 + %1) % 16)*4]
+ mov r13d, [rsi + ((11 + %1) % 16)*4]
+ mov ebx, [rsi + (( 9 + %1) % 16)*4]
+ mov r14d, [rsi + (( 7 + %1) % 16)*4]
+ mov ecx, [rsi + (( 5 + %1) % 16)*4]
+ mov r15d, [rsi + (( 2 + %1) % 16)*4]
+ mov edx, [rsi + (( 0 + %1) % 16)*4]
+
+ shr r12d, 15
+ shl eax, 16
+ shl ebx, 1
+ shl ecx, 1
+ shl edx, 1
+ shld r12d, eax, 16 ; BRC_X0
+ shld r13d, ebx, 16 ; BRC_X1
+ shld r14d, ecx, 16 ; BRC_X2
+ shld r15d, edx, 16 ; BRC_X3
+%endmacro
+
+%macro lookup_single_sbox 3
+%define %%table %1 ; [in] Pointer to table to look up
+%define %%idx %2 ; [in] Index to look up
+%define %%value %3 ; [out] Returned value from lookup function (rcx, rdx, r8, r9)
+
+%ifdef SAFE_LOOKUP
+ ;; Save all registers used in lookup_8bit (xmm0-5, r9,r10)
+ ;; and registers for param passing and return (4 regs, OS dependent)
+ ;; (6*16 + 6*8 = 144 bytes)
+ sub rsp, 144
+
+ movdqu [rsp], xmm0
+ movdqu [rsp + 16], xmm1
+ movdqu [rsp + 32], xmm2
+ movdqu [rsp + 48], xmm3
+ movdqu [rsp + 64], xmm4
+ movdqu [rsp + 80], xmm5
+ mov [rsp + 96], r9
+ mov [rsp + 104], r10
+
+%ifdef LINUX
+ mov [rsp + 112], rdi
+ mov [rsp + 120], rsi
+ mov [rsp + 128], rdx
+
+ mov rdi, %%table
+ mov rsi, %%idx
+ mov rdx, 256
+%else
+ mov [rsp + 112], rcx
+ mov [rsp + 120], rdx
+ mov [rsp + 128], r8
+ mov rcx, %%table
+ mov rdx, %%idx
+ mov r8, 256
+%endif
+ mov [rsp + 136], rax
+
+ call lookup_8bit_sse
+
+ ;; Restore all registers
+ movdqu xmm0, [rsp]
+ movdqu xmm1, [rsp + 16]
+ movdqu xmm2, [rsp + 32]
+ movdqu xmm3, [rsp + 48]
+ movdqu xmm4, [rsp + 64]
+ movdqu xmm5, [rsp + 80]
+ mov r9, [rsp + 96]
+ mov r10, [rsp + 104]
+
+%ifdef LINUX
+ mov rdi, [rsp + 112]
+ mov rsi, [rsp + 120]
+ mov rdx, [rsp + 128]
+%else
+ mov rcx, [rsp + 112]
+ mov rdx, [rsp + 120]
+ mov r8, [rsp + 128]
+%endif
+
+ ;; Move returned value from lookup function, before restoring rax
+ mov DWORD(%%value), eax
+ mov rax, [rsp + 136]
+
+ add rsp, 144
+
+%else ;; SAFE_LOOKUP
+
+ movzx DWORD(%%value), BYTE [%%table + %%idx]
+
+%endif ;; SAFE_LOOKUP
+%endmacro
+
+;
+; NONLIN_FUN()
+;
+; params
+; %1 == 1, then calculate W
+; uses
+; rdi rsi eax rdx edx
+; r8d r9d ebx
+; return
+; eax = W value
+; r10d = F_R1
+; r11d = F_R2
+;
+%macro NONLIN_FUN 1
+
+%if (%1 == 1)
+ mov eax, r12d
+ xor eax, r10d
+ add eax, r11d ; W = (BRC_X0 ^ F_R1) + F_R2
+%endif
+ lea rdi, [rel S0]
+ lea rsi, [rel S1]
+
+ add r10d, r13d ; W1= F_R1 + BRC_X1
+ xor r11d, r14d ; W2= F_R2 ^ BRC_X2
+
+ mov rdx, r10
+ shld edx, r11d, 16 ; P = (W1 << 16) | (W2 >> 16)
+ shld r11d, r10d, 16 ; Q = (W2 << 16) | (W1 >> 16)
+
+ mov ebx, edx
+ mov ecx, edx
+ mov r8d, edx
+ mov r9d, edx
+
+ rol ebx, 2
+ rol ecx, 10
+ rol r8d, 18
+ rol r9d, 24
+ xor edx, ebx
+ xor edx, ecx
+ xor edx, r8d
+ xor edx, r9d ; U = L1(P) = EDX, hi(RDX)=0
+ ;
+ xor r10, r10
+ shld ebx, edx, 24
+ shld r8d, edx, 16
+ shld r9d, edx, 8
+ and rdx, 0xFF
+ lookup_single_sbox rsi, rdx, rdx
+ and rbx, 0xFF
+ lookup_single_sbox rdi, rbx, rbx
+ and r8, 0xFF
+ lookup_single_sbox rsi, r8, r8
+ and r9, 0xFF
+ lookup_single_sbox rdi, r9, r9
+ shrd r10d, edx, 8
+ shrd r10d, ebx, 8
+ shrd r10d, r8d, 8
+ shrd r10d, r9d, 8
+ ;
+ mov ebx, r11d
+ mov ecx, r11d
+ mov r8d, r11d
+ mov r9d, r11d
+ rol ebx, 8
+ rol ecx, 14
+ rol r8d, 22
+ rol r9d, 30
+ xor r11d, ebx
+ xor r11d, ecx
+ xor r11d, r8d
+ xor r11d, r9d ; V = L2(Q) = ECX, hi(RCX)=0
+ ;
+ shld ebx, r11d, 24
+ shld r8d, r11d, 16
+ shld r9d, r11d, 8
+ and r11, 0xFF
+
+ lookup_single_sbox rsi, r11, r11
+ and rbx, 0xFF
+ lookup_single_sbox rdi, rbx, rbx
+ and r8, 0xFF
+ lookup_single_sbox rsi, r8, r8
+ and r9, 0xFF
+ lookup_single_sbox rdi, r9, r9
+
+ shrd r11d, r11d, 8
+
+ shrd r11d, ebx, 8
+ shrd r11d, r8d, 8
+ shrd r11d, r9d, 8
+%endmacro
+
+
+;
+; LFSR_UPDT()
+;
+; params
+; %1 - round number
+; uses
+; rax as input (ZERO or W)
+; return
+;
+%macro LFSR_UPDT 1
+ ;
+ ; ebx = LFSR_S0
+ ; ecx = LFSR_S4
+ ; edx = LFSR_S10
+ ; r8d = LFSR_S13
+ ; r9d = LFSR_S15
+ ;lea rsi, [LFSR_STA] ; moved to calling function
+
+ mov ebx, [rsi + (( 0 + %1) % 16)*4]
+ mov ecx, [rsi + (( 4 + %1) % 16)*4]
+ mov edx, [rsi + ((10 + %1) % 16)*4]
+ mov r8d, [rsi + ((13 + %1) % 16)*4]
+ mov r9d, [rsi + ((15 + %1) % 16)*4]
+
+ ; Calculate 64-bit LFSR feedback
+ add rax, rbx
+ shl rbx, 8
+ shl rcx, 20
+ shl rdx, 21
+ shl r8, 17
+ shl r9, 15
+ add rax, rbx
+ add rax, rcx
+ add rax, rdx
+ add rax, r8
+ add rax, r9
+
+ ; Reduce it to 31-bit value
+ mov rbx, rax
+ and rax, 0x7FFFFFFF
+ shr rbx, 31
+ add rax, rbx
+
+ mov rbx, rax
+ sub rbx, 0x7FFFFFFF
+ cmovns rax, rbx
+
+
+ ; LFSR_S16 = (LFSR_S15++) = eax
+ mov [rsi + (( 0 + %1) % 16)*4], eax
+%endmacro
+
+
+;
+; make_u31()
+;
+%macro make_u31 4
+
+%define %%Rt %1
+%define %%Ke %2
+%define %%Ek %3
+%define %%Iv %4
+ xor %%Rt, %%Rt
+ shrd %%Rt, %%Iv, 8
+ shrd %%Rt, %%Ek, 15
+ shrd %%Rt, %%Ke, 9
+%endmacro
+
+
+;
+; key_expand()
+;
+%macro key_expand 1
+ movzx r8d, byte [pKe + (%1 + 0)]
+ movzx r9d, word [rbx + ((%1 + 0)*2)]
+ movzx r10d, byte [pIv + (%1 + 0)]
+ make_u31 r11d, r8d, r9d, r10d
+ mov [rax + ((%1 + 0)*4)], r11d
+
+ movzx r12d, byte [pKe + (%1 + 1)]
+ movzx r13d, word [rbx + ((%1 + 1)*2)]
+ movzx r14d, byte [pIv + (%1 + 1)]
+ make_u31 r15d, r12d, r13d, r14d
+ mov [rax + ((%1 + 1)*4)], r15d
+%endmacro
+
+
+
+;----------------------------------------------------------------------------------------
+;;
+;;extern void Zuc_Initialization(uint8_t* pKey, uint8_t* pIV, uint32_t * pState)
+;;
+;; WIN64
+;; RCX - pKey
+;; RDX - pIV
+;; R8 - pState
+;; LIN64
+;; RDI - pKey
+;; RSI - pIV
+;; RDX - pState
+;;
+align 16
+MKGLOBAL(asm_ZucInitialization,function,internal)
+asm_ZucInitialization:
+
+%ifdef LINUX
+ %define pKe rdi
+ %define pIv rsi
+ %define pState rdx
+%else
+ %define pKe rcx
+ %define pIv rdx
+ %define pState r8
+%endif
+
+ ; save the base pointer
+ push rbp
+
+ ;load stack pointer to rbp and reserve memory in the red zone
+ mov rbp, rsp
+ sub rsp, 196
+
+ ; Save non-volatile registers
+ mov [rbp - 8], rbx
+ mov [rbp - 32], r12
+ mov [rbp - 40], r13
+ mov [rbp - 48], r14
+ mov [rbp - 56], r15
+%ifndef LINUX
+ mov [rbp - 64], rdi
+ mov [rbp - 72], rsi
+%endif
+
+ lea rbx, [rel EK_d] ; load pointer to D
+ lea rax, [pState] ; load pointer to pState
+ mov [rbp - 88], pState ; save pointer to pState
+
+ ; Expand key
+ key_expand 0
+ key_expand 2
+ key_expand 4
+ key_expand 6
+ key_expand 8
+ key_expand 10
+ key_expand 12
+ key_expand 14
+
+ ; Set R1 and R2 to zero
+ xor r10, r10
+ xor r11, r11
+
+ ; Shift LFSR 32-times, update state variables
+%assign N 0
+%rep 32
+ mov rdx, [rbp - 88] ; load pointer to pState
+ lea rsi, [rdx]
+
+ BITS_REORG N
+
+ NONLIN_FUN 1
+ shr eax, 1
+
+ mov rdx, [rbp - 88] ; re-load pointer to pState
+ lea rsi, [rdx]
+
+ LFSR_UPDT N
+
+%assign N N+1
+%endrep
+
+ ; And once more, initial round from keygen phase = 33 times
+ mov rdx, [rbp - 88] ; load pointer to pState
+ lea rsi, [rdx]
+
+
+ BITS_REORG 0
+ NONLIN_FUN 0
+ xor rax, rax
+
+ mov rdx, [rbp - 88] ; load pointer to pState
+ lea rsi, [rdx]
+
+ LFSR_UPDT 0
+
+ mov rdx, [rbp - 88] ; load pointer to pState
+ lea rsi, [rdx]
+
+ ; Save ZUC's state variables
+ mov [rsi + (16*4)],r10d ;F_R1
+ mov [rsi + (17*4)],r11d ;F_R2
+ mov [rsi + (18*4)],r12d ;BRC_X0
+ mov [rsi + (19*4)],r13d ;BRC_X1
+ mov [rsi + (20*4)],r14d ;BRC_X2
+ mov [rsi + (21*4)],r15d ;BRC_X3
+
+
+ ; Restore non-volatile registers
+ mov rbx, [rbp - 8]
+ mov r12, [rbp - 32]
+ mov r13, [rbp - 40]
+ mov r14, [rbp - 48]
+ mov r15, [rbp - 56]
+%ifndef LINUX
+ mov rdi, [rbp - 64]
+ mov rsi, [rbp - 72]
+%endif
+
+ ; restore base pointer
+ mov rsp, rbp
+ pop rbp
+
+ ret
+
+
+;;
+;; void asm_ZucGenKeystream8B(void *pKeystream, ZucState_t *pState);
+;;
+;; WIN64
+;; RCX - KS (key stream pointer)
+;; RDX - STATE (state pointer)
+;; LIN64
+;; RDI - KS (key stream pointer)
+;; RSI - STATE (state pointer)
+;;
+align 16
+MKGLOBAL(asm_ZucGenKeystream8B,function,internal)
+asm_ZucGenKeystream8B:
+
+%ifdef LINUX
+ %define pKS rdi
+ %define pState rsi
+%else
+ %define pKS rcx
+ %define pState rdx
+%endif
+ ; save the base pointer
+ push rbp
+
+ ;load stack pointer to rbp and reserve memory in the red zone
+ mov rbp, rsp
+ sub rsp, 196
+
+ ; Save non-volatile registers
+ mov [rbp - 8], rbx
+ mov [rbp - 32], r12
+ mov [rbp - 40], r13
+ mov [rbp - 48], r14
+ mov [rbp - 56], r15
+%ifndef LINUX
+ mov [rbp - 64], rdi
+ mov [rbp - 72], rsi
+%endif
+
+
+ ; Load input keystream pointer parameter in RAX
+ mov rax, pKS
+
+ ; Restore ZUC's state variables
+ xor r10, r10
+ xor r11, r11
+ mov r10d, [pState + OFFSET_FR1]
+ mov r11d, [pState + OFFSET_FR2]
+ mov r12d, [pState + OFFSET_BRC_X0]
+ mov r13d, [pState + OFFSET_BRC_X1]
+ mov r14d, [pState + OFFSET_BRC_X2]
+ mov r15d, [pState + OFFSET_BRC_X3]
+
+ ; Store keystream pointer
+ mov [rbp - 80], rax
+
+ ; Store ZUC State Pointer
+ mov [rbp - 88], pState
+
+ ; Generate 8B of keystream in 2 rounds
+%assign N 1
+%rep 2
+
+ mov rdx, [rbp - 88] ; load *pState
+ lea rsi, [rdx]
+
+ BITS_REORG N
+ NONLIN_FUN 1
+
+ ;Store the keystream
+ mov rbx, [rbp - 80] ; load *pkeystream
+ xor eax, r15d
+ mov [rbx], eax
+ add rbx, 4 ; increment the pointer
+ mov [rbp - 80], rbx ; save pkeystream
+
+ xor rax, rax
+
+ mov rdx, [rbp - 88] ; load *pState
+ lea rsi, [rdx]
+
+ LFSR_UPDT N
+
+%assign N N+1
+%endrep
+
+ mov rsi, [rbp - 88] ; load pState
+
+
+ ; Save ZUC's state variables
+ mov [rsi + OFFSET_FR1], r10d
+ mov [rsi + OFFSET_FR2], r11d
+ mov [rsi + OFFSET_BRC_X0], r12d
+ mov [rsi + OFFSET_BRC_X1], r13d
+ mov [rsi + OFFSET_BRC_X2], r14d
+ mov [rsi + OFFSET_BRC_X3], r15d
+
+ ; Restore non-volatile registers
+ mov rbx, [rbp - 8]
+ mov r12, [rbp - 32]
+ mov r13, [rbp - 40]
+ mov r14, [rbp - 48]
+ mov r15, [rbp - 56]
+%ifndef LINUX
+ mov rdi, [rbp - 64]
+ mov rsi, [rbp - 72]
+%endif
+
+ mov rsp, rbp
+ pop rbp
+
+ ret
+
+
+;;
+;; void asm_ZucGenKeystream64B(uint32_t * pKeystream, uint32_t * pState);
+;;
+;; WIN64
+;; RCX - KS (key stream pointer)
+;; RDX - STATE (state pointer)
+;; LIN64
+;; RDI - KS (key stream pointer)
+;; RSI - STATE (state pointer)
+;;
+align 16
+MKGLOBAL(asm_ZucGenKeystream64B,function,internal)
+asm_ZucGenKeystream64B:
+
+%ifdef LINUX
+ %define pKS rdi
+ %define pState rsi
+%else
+ %define pKS rcx
+ %define pState rdx
+%endif
+ ; save the base pointer
+ push rbp
+
+ ;load stack pointer to rbp and reserve memory in the red zone
+ mov rbp, rsp
+ sub rsp, 196
+
+ ; Save non-volatile registers
+ mov [rbp - 8], rbx
+ mov [rbp - 32], r12
+ mov [rbp - 40], r13
+ mov [rbp - 48], r14
+ mov [rbp - 56], r15
+%ifndef LINUX
+ mov [rbp - 64], rdi
+ mov [rbp - 72], rsi
+%endif
+
+
+ ; Load input keystream pointer parameter in RAX
+ mov rax, pKS
+
+ ; Restore ZUC's state variables
+ xor r10, r10
+ xor r11, r11
+ mov r10d, [pState + OFFSET_FR1]
+ mov r11d, [pState + OFFSET_FR2]
+ mov r12d, [pState + OFFSET_BRC_X0]
+ mov r13d, [pState + OFFSET_BRC_X1]
+ mov r14d, [pState + OFFSET_BRC_X2]
+ mov r15d, [pState + OFFSET_BRC_X3]
+
+ ; Store keystream pointer
+ mov [rbp - 80], rax
+
+ ; Store ZUC State Pointer
+ mov [rbp - 88], pState
+
+ ; Generate 64B of keystream in 16 rounds
+%assign N 1
+%rep 16
+
+ mov rdx, [rbp - 88] ; load *pState
+ lea rsi, [rdx]
+
+ BITS_REORG N
+ NONLIN_FUN 1
+
+ ;Store the keystream
+ mov rbx, [rbp - 80] ; load *pkeystream
+ xor eax, r15d
+ mov [rbx], eax
+ add rbx, 4 ; increment the pointer
+ mov [rbp - 80], rbx ; save pkeystream
+
+ xor rax, rax
+
+ mov rdx, [rbp - 88] ; load *pState
+ lea rsi, [rdx]
+
+ LFSR_UPDT N
+
+%assign N N+1
+%endrep
+
+ mov rsi, [rbp - 88] ; load pState
+
+
+ ; Save ZUC's state variables
+ mov [rsi + OFFSET_FR1], r10d
+ mov [rsi + OFFSET_FR2], r11d
+ mov [rsi + OFFSET_BRC_X0], r12d
+ mov [rsi + OFFSET_BRC_X1], r13d
+ mov [rsi + OFFSET_BRC_X2], r14d
+ mov [rsi + OFFSET_BRC_X3], r15d
+
+ ; Restore non-volatile registers
+ mov rbx, [rbp - 8]
+ mov r12, [rbp - 32]
+ mov r13, [rbp - 40]
+ mov r14, [rbp - 48]
+ mov r15, [rbp - 56]
+%ifndef LINUX
+ mov rdi, [rbp - 64]
+ mov rsi, [rbp - 72]
+%endif
+
+ mov rsp, rbp
+ pop rbp
+
+ ret
+
+
diff --git a/src/spdk/intel-ipsec-mb/include/zuc_internal.h b/src/spdk/intel-ipsec-mb/include/zuc_internal.h
new file mode 100755
index 000000000..525a1604c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/include/zuc_internal.h
@@ -0,0 +1,432 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/**
+ ******************************************************************************
+ * @file zuc_internal.h
+ *
+ * @description
+ * This header file defines the internal API's and data types for the
+ * 3GPP algorithm ZUC.
+ *
+ *****************************************************************************/
+
+#ifndef ZUC_INTERNAL_H_
+#define ZUC_INTERNAL_H_
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include "intel-ipsec-mb.h"
+#include "immintrin.h"
+#include "include/wireless_common.h"
+
+/* 64 bytes of Keystream will be generated */
+#define ZUC_KEYSTR_LEN (64)
+#define NUM_LFSR_STATES (16)
+#define ZUC_WORD (32)
+
+/* Range of input data for ZUC is from 1 to 65504 bits */
+#define ZUC_MIN_LEN 1
+#define ZUC_MAX_LEN 65504
+
+#ifdef DEBUG
+#ifdef _WIN32
+#define DEBUG_PRINT(_fmt, ...) \
+ fprintf(stderr, "%s()::%d " _fmt , __FUNCTION__, __LINE__, __VA_ARGS__)
+#else
+#define DEBUG_PRINT(_fmt, ...) \
+ fprintf(stderr, "%s()::%d " _fmt , __func__, __LINE__, __VA_ARGS__)
+#endif
+#else
+#define DEBUG_PRINT(_fmt, ...)
+#endif
+
+/**
+ ******************************************************************************
+ * @description
+ * Macro will loop through keystream of length 64bytes and xor with the
+ * input buffer placing the result in the output buffer.
+ * KeyStream bytes must be swaped on 32bit boundary before this operation
+ *
+ *****************************************************************************/
+#define ZUC_XOR_KEYSTREAM(pIn64, pOut64, pKeyStream64) \
+{ \
+ int i =0; \
+ union SwapBytes_t { \
+ uint64_t l64; \
+ uint32_t w32[2]; \
+ }swapBytes; \
+ /* loop through the key stream and xor 64 bits at a time */ \
+ for(i =0; i < ZUC_KEYSTR_LEN/8; i++) { \
+ swapBytes.l64 = *pKeyStream64++; \
+ swapBytes.w32[0] = bswap4(swapBytes.w32[0]); \
+ swapBytes.w32[1] = bswap4(swapBytes.w32[1]); \
+ *pOut64++ = *pIn64++ ^ swapBytes.l64; \
+ } \
+}
+
+/**
+ *****************************************************************************
+ * @description
+ * Packed structure to store the ZUC state for a single packet. *
+ *****************************************************************************/
+typedef struct zuc_state_s {
+ uint32_t lfsrState[16];
+ /**< State registers of the LFSR */
+ uint32_t fR1;
+ /**< register of F */
+ uint32_t fR2;
+ /**< register of F */
+ uint32_t bX0;
+ /**< Output X0 of the bit reorganization */
+ uint32_t bX1;
+ /**< Output X1 of the bit reorganization */
+ uint32_t bX2;
+ /**< Output X2 of the bit reorganization */
+ uint32_t bX3;
+ /**< Output X3 of the bit reorganization */
+} ZucState_t;
+
+/**
+ *****************************************************************************
+ * @description
+ * Packed structure to store the ZUC state for a single packet. *
+ *****************************************************************************/
+typedef struct zuc_state_4_s {
+ uint32_t lfsrState[16][4];
+ /**< State registers of the LFSR */
+ uint32_t fR1[4];
+ /**< register of F */
+ uint32_t fR2[4];
+ /**< register of F */
+ uint32_t bX0[4];
+ /**< Output X0 of the bit reorganization for 4 packets */
+ uint32_t bX1[4];
+ /**< Output X1 of the bit reorganization for 4 packets */
+ uint32_t bX2[4];
+ /**< Output X2 of the bit reorganization for 4 packets */
+ uint32_t bX3[4];
+ /**< Output X3 of the bit reorganization for 4 packets */
+} ZucState4_t;
+
+/**
+ *****************************************************************************
+ * @description
+ * Structure to store pointers to the 4 keys to be used as input to
+ * @ref asm_ZucInitialization_4 and @ref asm_ZucGenKeystream64B_4
+ *****************************************************************************/
+typedef struct zuc_key_4_s {
+ const uint8_t *pKey1;
+ /**< Pointer to 128-bit key for packet 1 */
+ const uint8_t *pKey2;
+ /**< Pointer to 128-bit key for packet 2 */
+ const uint8_t *pKey3;
+ /**< Pointer to 128-bit key for packet 3 */
+ const uint8_t *pKey4;
+ /**< Pointer to 128-bit key for packet 4 */
+} ZucKey4_t;
+
+/**
+ *****************************************************************************
+ * @description
+ * Structure to store pointers to the 4 IV's to be used as input to
+ * @ref asm_ZucInitialization_4 and @ref asm_ZucGenKeystream64B_4
+ *****************************************************************************/
+typedef struct zuc_iv_4_s {
+ const uint8_t *pIv1;
+ /**< Pointer to 128-bit initialization vector for packet 1 */
+ const uint8_t *pIv2;
+ /**< Pointer to 128-bit initialization vector for packet 2 */
+ const uint8_t *pIv3;
+ /**< Pointer to 128-bit initialization vector for packet 3 */
+ const uint8_t *pIv4;
+ /**< Pointer to 128-bit initialization vector for packet 4 */
+} ZucIv4_t;
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external function that implements the initialization
+ * stage of the ZUC algorithm. The function will initialize the state
+ * for a single packet operation.
+ *
+ * @param[in] pKey Pointer to the 128-bit initial key that
+ * will be used when initializing the ZUC
+ * state.
+ * @param[in] pIv Pointer to the 128-bit initial vector that
+ * will be used when initializing the ZUC
+ * state.
+ * @param[in,out] pState Pointer to a ZUC state structure of type
+ * @ref ZucState_t that will be populated
+ * with the initialized ZUC state.
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_ZucInitialization(const void *pKey,
+ const void *pIv,
+ ZucState_t *pState);
+
+/**
+ ******************************************************************************
+ * @description
+ * Definition of the external function that implements the initialization
+ * stage of the ZUC algorithm for 4 packets. The function will initialize
+ * the state for 4 individual packets.
+ *
+ * @param[in] pKey Pointer to an array of 128-bit initial keys
+ * that will be used when initializing the ZUC
+ * state.
+ * @param[in] pIv Pointer to an array of 128-bit initial
+ * vectors that will be used when initializing
+ * the ZUC state.
+ * @param[in,out] pState Pointer to a ZUC state structure of type
+ * @ref ZucState4_t that will be populated
+ * with the initialized ZUC state.
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_ZucInitialization_4_sse(ZucKey4_t *pKeys,
+ ZucIv4_t *pIvs,
+ ZucState4_t *pState);
+
+IMB_DLL_LOCAL void asm_ZucInitialization_4_avx(ZucKey4_t *pKeys,
+ ZucIv4_t *pIvs,
+ ZucState4_t *pState);
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external function that implements the working
+ * stage of the ZUC algorithm. The function will generate 64 bytes of
+ * keystream.
+ *
+ * @param[in,out] pKeystream Pointer to an input buffer that will
+ * contain the generated keystream.
+
+ * @param[in] pState Pointer to a ZUC state structure of type
+ * @ref ZucState_t
+ *
+ * @pre
+ * A successful call to @ref asm_ZucInitialization to initialize the ZUC
+ * state.
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_ZucGenKeystream64B(uint32_t *pKeystream,
+ ZucState_t *pState);
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external function that implements the working
+ * stage of the ZUC algorithm. The function will generate 8 bytes of
+ * keystream.
+ *
+ * @param[in,out] pKeystream Pointer to an input buffer that will
+ * contain the generated keystream.
+
+ * @param[in] pState Pointer to a ZUC state structure of type
+ * @ref ZucState_t
+ *
+ * @pre
+ * A successful call to @ref asm_ZucInitialization to initialize the ZUC
+ * state.
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_ZucGenKeystream8B(void *pKeystream,
+ ZucState_t *pState);
+
+/**
+ ******************************************************************************
+ *
+ * @description
+ * Definition of the external function that implements the working
+ * stage of the ZUC algorithm. The function will generate 64 bytes of
+ * keystream for four packets in parallel.
+ *
+ * @param[in] pState Pointer to a ZUC state structure of type
+ * @ref ZucState4_t
+ *
+ * @param[in,out] pKeyStr1 Pointer to an input buffer that will
+ * contain the generated keystream for packet
+ * one.
+ * @param[in,out] pKeyStr2 Pointer to an input buffer that will
+ * contain the generated keystream for packet
+ * two.
+ * @param[in,out] pKeyStr3 Pointer to an input buffer that will
+ * contain the generated keystream for packet
+ * three.
+ * @param[in,out] pKeyStr4 Pointer to an input buffer that will
+ * contain the generated keystream for packet
+ * four.
+ *
+ * @pre
+ * A successful call to @ref asm_ZucInitialization_4 to initialize the ZUC
+ * state.
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL void asm_ZucGenKeystream64B_4_sse(ZucState4_t *pState,
+ uint32_t *pKeyStr1,
+ uint32_t *pKeyStr2,
+ uint32_t *pKeyStr3,
+ uint32_t *pKeyStr4);
+
+IMB_DLL_LOCAL void asm_ZucGenKeystream64B_4_avx(ZucState4_t *pState,
+ uint32_t *pKeyStr1,
+ uint32_t *pKeyStr2,
+ uint32_t *pKeyStr3,
+ uint32_t *pKeyStr4);
+
+/**
+ ******************************************************************************
+ * @description
+ * Definition of the external function to update the authentication tag
+ * based on keystream and data (SSE varient)
+ *
+ * @param[in] T Authentication tag
+ *
+ * @param[in] ks Pointer to key stream
+ *
+ * @param[in] data Pointer to the data
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL uint32_t asm_Eia3Round64BSSE(uint32_t T, const void *ks,
+ const void *data);
+
+/**
+ ******************************************************************************
+ * @description
+ * Definition of the external function to return the authentication
+ * update value to be XOR'ed with current authentication tag (SSE variant)
+ *
+ * @param[in] ks Pointer to key stream
+ *
+ * @param[in] data Pointer to the data
+ *
+ * @param[in] n_words Number of data bits to be processed
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL uint32_t asm_Eia3RemainderSSE(const void *ks, const void *data,
+ const uint64_t n_words);
+
+/**
+ ******************************************************************************
+ * @description
+ * Definition of the external function to update the authentication tag
+ * based on keystream and data (AVX variant)
+ *
+ * @param[in] T Authentication tag
+ *
+ * @param[in] ks Pointer to key stream
+ *
+ * @param[in] data Pointer to the data
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL uint32_t asm_Eia3Round64BAVX(uint32_t T, const void *ks,
+ const void *data);
+
+/**
+ ******************************************************************************
+ * @description
+ * Definition of the external function to return the authentication
+ * update value to be XOR'ed with current authentication tag (AVX variant)
+ *
+ * @param[in] ks Pointer to key stream
+ *
+ * @param[in] data Pointer to the data
+ *
+ * @param[in] n_words Number of data bits to be processed
+ *
+ * @pre
+ * None
+ *
+ *****************************************************************************/
+IMB_DLL_LOCAL uint32_t asm_Eia3RemainderAVX(const void *ks, const void *data,
+ const uint64_t n_words);
+
+
+/* the s-boxes */
+extern const uint8_t S0[256];
+extern const uint8_t S1[256];
+
+void zuc_eea3_1_buffer_sse(const void *pKey, const void *pIv,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void zuc_eea3_4_buffer_sse(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t lengthInBytes[4]);
+
+void zuc_eea3_n_buffer_sse(const void * const pKey[], const void * const pIv[],
+ const void * const pBufferIn[], void *pBufferOut[],
+ const uint32_t lengthInBytes[],
+ const uint32_t numBuffers);
+
+void zuc_eia3_1_buffer_sse(const void *pKey, const void *pIv,
+ const void *pBufferIn, const uint32_t lengthInBits,
+ uint32_t *pMacI);
+
+void zuc_eea3_1_buffer_avx(const void *pKey, const void *pIv,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t lengthInBytes);
+
+void zuc_eea3_4_buffer_avx(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t lengthInBytes[4]);
+
+void zuc_eea3_n_buffer_avx(const void * const pKey[], const void * const pIv[],
+ const void * const pBufferIn[], void *pBufferOut[],
+ const uint32_t lengthInBytes[],
+ const uint32_t numBuffers);
+
+void zuc_eia3_1_buffer_avx(const void *pKey, const void *pIv,
+ const void *pBufferIn, const uint32_t lengthInBits,
+ uint32_t *pMacI);
+
+
+#endif /* ZUC_INTERNAL_H_ */
+
diff --git a/src/spdk/intel-ipsec-mb/intel-ipsec-mb.h b/src/spdk/intel-ipsec-mb/intel-ipsec-mb.h
new file mode 100644
index 000000000..8b626a16b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/intel-ipsec-mb.h
@@ -0,0 +1,2409 @@
+/*******************************************************************************
+ Copyright (c) 2012-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef IMB_IPSEC_MB_H
+#define IMB_IPSEC_MB_H
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* 128-bit data type that is not in sdtint.h */
+typedef struct {
+ uint64_t low;
+ uint64_t high;
+} uint128_t;
+
+/*
+ * Macros for aligning data structures and function inlines
+ */
+#if defined __linux__ || defined __FreeBSD__
+/* Linux/FreeBSD */
+#define DECLARE_ALIGNED(decl, alignval) \
+ decl __attribute__((aligned(alignval)))
+#define __forceinline \
+ static inline __attribute__((always_inline))
+
+#if __GNUC__ >= 4
+#define IMB_DLL_EXPORT __attribute__((visibility("default")))
+#define IMB_DLL_LOCAL __attribute__((visibility("hidden")))
+#else /* GNU C 4.0 and later */
+#define IMB_DLL_EXPORT
+#define IMB_DLL_LOCAL
+#endif /* different C compiler */
+
+#else
+/* Windows */
+#define DECLARE_ALIGNED(decl, alignval) \
+ __declspec(align(alignval)) decl
+#define __forceinline \
+ static __forceinline
+
+/* Windows DLL export is done via DEF file */
+#define IMB_DLL_EXPORT
+#define IMB_DLL_LOCAL
+#endif
+
+/* Library version */
+#define IMB_VERSION_STR "0.53.0"
+#define IMB_VERSION_NUM 0x3500
+
+/* Macro to translate version number */
+#define IMB_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+
+/*
+ * Custom ASSERT and DIM macros
+ */
+#ifdef DEBUG
+#include <assert.h>
+#define IMB_ASSERT(x) assert(x)
+#else
+#define IMB_ASSERT(x)
+#endif
+
+#ifndef IMB_DIM
+#define IMB_DIM(x) (sizeof(x) / sizeof(x[0]))
+#endif
+
+/*
+ * Algorithm constants
+ */
+
+#define DES_KEY_SCHED_SIZE (16 * 8) /* 16 rounds x 8 bytes */
+#define DES_BLOCK_SIZE 8
+
+#define AES_BLOCK_SIZE 16
+
+#define NUM_MD5_DIGEST_WORDS 4
+#define NUM_SHA_DIGEST_WORDS 5
+#define NUM_SHA_256_DIGEST_WORDS 8
+#define NUM_SHA_224_DIGEST_WORDS 7
+#define NUM_SHA_512_DIGEST_WORDS 8
+#define NUM_SHA_384_DIGEST_WORDS 6
+
+#define SHA_DIGEST_WORD_SIZE 4
+#define SHA224_DIGEST_WORD_SIZE 4
+#define SHA256_DIGEST_WORD_SIZE 4
+#define SHA384_DIGEST_WORD_SIZE 8
+#define SHA512_DIGEST_WORD_SIZE 8
+
+#define SHA1_DIGEST_SIZE_IN_BYTES \
+ (NUM_SHA_DIGEST_WORDS * SHA_DIGEST_WORD_SIZE)
+#define SHA224_DIGEST_SIZE_IN_BYTES \
+ (NUM_SHA_224_DIGEST_WORDS * SHA224_DIGEST_WORD_SIZE)
+#define SHA256_DIGEST_SIZE_IN_BYTES \
+ (NUM_SHA_256_DIGEST_WORDS * SHA256_DIGEST_WORD_SIZE)
+#define SHA384_DIGEST_SIZE_IN_BYTES \
+ (NUM_SHA_384_DIGEST_WORDS * SHA384_DIGEST_WORD_SIZE)
+#define SHA512_DIGEST_SIZE_IN_BYTES \
+ (NUM_SHA_512_DIGEST_WORDS * SHA512_DIGEST_WORD_SIZE)
+
+#define SHA1_BLOCK_SIZE 64 /* 512 bits is 64 byte blocks */
+#define SHA_256_BLOCK_SIZE 64 /* 512 bits is 64 byte blocks */
+#define SHA_384_BLOCK_SIZE 128
+#define SHA_512_BLOCK_SIZE 128
+
+#define KASUMI_KEY_SIZE 16
+#define KASUMI_IV_SIZE 8
+#define KASUMI_BLOCK_SIZE 8
+#define KASUMI_DIGEST_SIZE 4
+
+/* Number of lanes AVX512, AVX2, AVX and SSE */
+#define AVX512_NUM_SHA1_LANES 16
+#define AVX512_NUM_SHA256_LANES 16
+#define AVX512_NUM_SHA512_LANES 8
+#define AVX512_NUM_MD5_LANES 32
+#define AVX512_NUM_DES_LANES 16
+
+#define AVX2_NUM_SHA1_LANES 8
+#define AVX2_NUM_SHA256_LANES 8
+#define AVX2_NUM_SHA512_LANES 4
+#define AVX2_NUM_MD5_LANES 16
+
+#define AVX_NUM_SHA1_LANES 4
+#define AVX_NUM_SHA256_LANES 4
+#define AVX_NUM_SHA512_LANES 2
+#define AVX_NUM_MD5_LANES 8
+
+#define SSE_NUM_SHA1_LANES AVX_NUM_SHA1_LANES
+#define SSE_NUM_SHA256_LANES AVX_NUM_SHA256_LANES
+#define SSE_NUM_SHA512_LANES AVX_NUM_SHA512_LANES
+#define SSE_NUM_MD5_LANES AVX_NUM_MD5_LANES
+
+/*
+ * Each row is sized to hold enough lanes for AVX2, AVX1 and SSE use a subset
+ * of each row. Thus one row is not adjacent in memory to its neighboring rows
+ * in the case of SSE and AVX1.
+ */
+#define MD5_DIGEST_SZ (NUM_MD5_DIGEST_WORDS * AVX512_NUM_MD5_LANES)
+#define SHA1_DIGEST_SZ (NUM_SHA_DIGEST_WORDS * AVX512_NUM_SHA1_LANES)
+#define SHA256_DIGEST_SZ (NUM_SHA_256_DIGEST_WORDS * AVX512_NUM_SHA256_LANES)
+#define SHA512_DIGEST_SZ (NUM_SHA_512_DIGEST_WORDS * AVX512_NUM_SHA512_LANES)
+
+/*
+ * Job structure definitions
+ */
+
+typedef enum {
+ STS_BEING_PROCESSED = 0,
+ STS_COMPLETED_AES = 1,
+ STS_COMPLETED_HMAC = 2,
+ STS_COMPLETED = 3, /* COMPLETED_AES | COMPLETED_HMAC */
+ STS_INVALID_ARGS = 4,
+ STS_INTERNAL_ERROR,
+ STS_ERROR
+} JOB_STS;
+
+typedef enum {
+ CBC = 1,
+ CNTR,
+ NULL_CIPHER,
+ DOCSIS_SEC_BPI,
+#ifndef NO_GCM
+ GCM,
+#endif /* !NO_GCM */
+ CUSTOM_CIPHER,
+ DES,
+ DOCSIS_DES,
+ CCM,
+ DES3,
+ PON_AES_CNTR,
+ ECB,
+ CNTR_BITLEN, /* 128-EEA2/NIA2 (3GPP) */
+} JOB_CIPHER_MODE;
+
+typedef enum {
+ ENCRYPT = 1,
+ DECRYPT
+} JOB_CIPHER_DIRECTION;
+
+typedef enum {
+ SHA1 = 1, /* HMAC-SHA1 */
+ SHA_224, /* HMAC-SHA224 */
+ SHA_256, /* HMAC-SHA256 */
+ SHA_384, /* HMAC-SHA384 */
+ SHA_512, /* HMAC-SHA512 */
+ AES_XCBC,
+ MD5, /* HMAC-MD5 */
+ NULL_HASH,
+#ifndef NO_GCM
+ AES_GMAC,
+#endif /* !NO_GCM */
+ CUSTOM_HASH,
+ AES_CCM, /* AES128-CCM */
+ AES_CMAC, /* AES128-CMAC */
+ PLAIN_SHA1, /* SHA1 */
+ PLAIN_SHA_224, /* SHA224 */
+ PLAIN_SHA_256, /* SHA256 */
+ PLAIN_SHA_384, /* SHA384 */
+ PLAIN_SHA_512, /* SHA512 */
+ AES_CMAC_BITLEN, /* 128-EIA2 (3GPP) */
+ PON_CRC_BIP
+} JOB_HASH_ALG;
+
+typedef enum {
+ CIPHER_HASH = 1,
+ HASH_CIPHER
+} JOB_CHAIN_ORDER;
+
+typedef enum {
+ AES_128_BYTES = 16,
+ AES_192_BYTES = 24,
+ AES_256_BYTES = 32
+} AES_KEY_SIZE_BYTES;
+
+typedef struct JOB_AES_HMAC {
+ /*
+ * For AES, aes_enc_key_expanded and aes_dec_key_expanded are
+ * expected to point to expanded keys structure.
+ * - AES-CTR, AES-ECB and AES-CCM, only aes_enc_key_expanded is used
+ * - DOCSIS (AES-CBC + AES-CFB), both pointers are used
+ * aes_enc_key_expanded has to be set always for the partial block
+ *
+ * For DES, aes_enc_key_expanded and aes_dec_key_expanded are
+ * expected to point to DES key schedule.
+ * - same key schedule used for enc and dec operations
+ *
+ * For 3DES, aes_enc_key_expanded and aes_dec_key_expanded are
+ * expected to point to an array of 3 pointers for
+ * the corresponding 3 key schedules.
+ * - same key schedule used for enc and dec operations
+ */
+ const void *aes_enc_key_expanded; /* 16-byte aligned pointer. */
+ const void *aes_dec_key_expanded;
+ uint64_t aes_key_len_in_bytes; /* 16, 24 and 32 byte (128, 192 and
+ * 256-bit) keys supported */
+ const uint8_t *src; /* Input. May be cipher text or plaintext.
+ * In-place ciphering allowed. */
+ uint8_t *dst; /*Output. May be cipher text or plaintext.
+ * In-place ciphering allowed, i.e. dst = src. */
+ uint64_t cipher_start_src_offset_in_bytes;
+ /* Max len = 65472 bytes.
+ * IPSec case, the maximum cipher
+ * length would be:
+ * 65535 -
+ * 20 (outer IP header) -
+ * 24 (ESP header + IV) -
+ * 12 (supported ICV length) */
+ union {
+ uint64_t msg_len_to_cipher_in_bytes;
+ uint64_t msg_len_to_cipher_in_bits;
+ };
+ uint64_t hash_start_src_offset_in_bytes;
+ /* Max len = 65496 bytes.
+ * (Max cipher len +
+ * 24 bytes ESP header) */
+ union {
+ uint64_t msg_len_to_hash_in_bytes;
+ uint64_t msg_len_to_hash_in_bits;
+ };
+ const uint8_t *iv; /* AES IV. */
+ uint64_t iv_len_in_bytes; /* AES IV length in bytes. */
+ uint8_t *auth_tag_output; /* HMAC Tag output. This may point to
+ * a location in the src buffer
+ * (for in place)*/
+ uint64_t auth_tag_output_len_in_bytes; /* Authentication (i.e. HMAC) tag
+ * output length in bytes
+ * (may be a truncated value) */
+
+ /* Start algorithm-specific fields */
+ union {
+ struct _HMAC_specific_fields {
+ /* Hashed result of HMAC key xor'd with ipad (0x36). */
+ const uint8_t *_hashed_auth_key_xor_ipad;
+ /* Hashed result of HMAC key xor'd with opad (0x5c). */
+ const uint8_t *_hashed_auth_key_xor_opad;
+ } HMAC;
+ struct _AES_XCBC_specific_fields {
+ /* 16-byte aligned pointers */
+ const uint32_t *_k1_expanded;
+ const uint8_t *_k2;
+ const uint8_t *_k3;
+ } XCBC;
+ struct _AES_CCM_specific_fields {
+ /* Additional Authentication Data (AAD) */
+ const void *aad;
+ uint64_t aad_len_in_bytes; /* Length of AAD */
+ } CCM;
+ struct _AES_CMAC_specific_fields {
+ const void *_key_expanded; /* 16-byte aligned */
+ const void *_skey1;
+ const void *_skey2;
+ } CMAC;
+#ifndef NO_GCM
+ struct _AES_GCM_specific_fields {
+ /* Additional Authentication Data (AAD) */
+ const void *aad;
+ uint64_t aad_len_in_bytes; /* Length of AAD */
+ } GCM;
+#endif /* !NO_GCM */
+ } u;
+
+ JOB_STS status;
+ JOB_CIPHER_MODE cipher_mode; /* CBC, CNTR, DES, GCM etc. */
+ JOB_CIPHER_DIRECTION cipher_direction; /* Encrypt/decrypt */
+ JOB_HASH_ALG hash_alg; /* SHA-1 or others... */
+ JOB_CHAIN_ORDER chain_order; /* CIPHER_HASH or HASH_CIPHER.
+ * For AES-CCM, when encrypting,
+ * HASH_CIPHER must be selected,
+ * and when decrypting,
+ * CIPHER_HASH must be selected. */
+
+ void *user_data;
+ void *user_data2;
+
+ /*
+ * stateless custom cipher and hash
+ * Return:
+ * success: 0
+ * fail: other
+ */
+ int (*cipher_func)(struct JOB_AES_HMAC *);
+ int (*hash_func)(struct JOB_AES_HMAC *);
+} JOB_AES_HMAC;
+
+/*
+ * Argument structures for various algorithms
+ */
+typedef struct {
+ const uint8_t *in[16];
+ uint8_t *out[16];
+ const uint32_t *keys[16];
+ DECLARE_ALIGNED(uint128_t IV[16], 64);
+ DECLARE_ALIGNED(uint128_t key_tab[15][16], 64);
+} AES_ARGS;
+
+typedef struct {
+ DECLARE_ALIGNED(uint32_t digest[SHA1_DIGEST_SZ], 32);
+ uint8_t *data_ptr[AVX512_NUM_SHA1_LANES];
+} SHA1_ARGS;
+
+typedef struct {
+ DECLARE_ALIGNED(uint32_t digest[SHA256_DIGEST_SZ], 32);
+ uint8_t *data_ptr[AVX512_NUM_SHA256_LANES];
+} SHA256_ARGS;
+
+typedef struct {
+ DECLARE_ALIGNED(uint64_t digest[SHA512_DIGEST_SZ], 32);
+ uint8_t *data_ptr[AVX512_NUM_SHA512_LANES];
+} SHA512_ARGS;
+
+typedef struct {
+ DECLARE_ALIGNED(uint32_t digest[MD5_DIGEST_SZ], 32);
+ uint8_t *data_ptr[AVX512_NUM_MD5_LANES];
+} MD5_ARGS;
+
+typedef struct {
+ const uint8_t *in[8];
+ const uint32_t *keys[8];
+ DECLARE_ALIGNED(uint128_t ICV[8], 32);
+} AES_XCBC_ARGS_x8;
+
+typedef struct {
+ const uint8_t *in[AVX512_NUM_DES_LANES];
+ uint8_t *out[AVX512_NUM_DES_LANES];
+ const uint8_t *keys[AVX512_NUM_DES_LANES];
+ uint32_t IV[AVX512_NUM_DES_LANES * 2]; /* uint32_t is more handy here */
+ uint32_t partial_len[AVX512_NUM_DES_LANES];
+ uint32_t block_len[AVX512_NUM_DES_LANES];
+ const uint8_t *last_in[AVX512_NUM_DES_LANES];
+ uint8_t *last_out[AVX512_NUM_DES_LANES];
+} DES_ARGS_x16;
+
+/* AES out-of-order scheduler fields */
+typedef struct {
+ AES_ARGS args;
+ DECLARE_ALIGNED(uint16_t lens[16], 16);
+ /* each nibble is index (0...15) of an unused lane,
+ * the last nibble is set to F as a flag
+ */
+ uint64_t unused_lanes;
+ JOB_AES_HMAC *job_in_lane[16];
+ uint64_t num_lanes_inuse;
+} MB_MGR_AES_OOO;
+
+/* AES XCBC out-of-order scheduler fields */
+typedef struct {
+ DECLARE_ALIGNED(uint8_t final_block[2 * 16], 32);
+ JOB_AES_HMAC *job_in_lane;
+ uint64_t final_done;
+} XCBC_LANE_DATA;
+
+typedef struct {
+ AES_XCBC_ARGS_x8 args;
+ DECLARE_ALIGNED(uint16_t lens[8], 16);
+ /* each byte is index (0...3) of unused lanes
+ * byte 4 is set to FF as a flag
+ */
+ uint64_t unused_lanes;
+ XCBC_LANE_DATA ldata[8];
+} MB_MGR_AES_XCBC_OOO;
+
+/* AES-CCM out-of-order scheduler structure */
+typedef struct {
+ AES_ARGS args; /* need to re-use AES arguments */
+ DECLARE_ALIGNED(uint16_t lens[8], 16);
+ DECLARE_ALIGNED(uint16_t init_done[8], 16);
+ /* each byte is index (0...3) of unused lanes
+ * byte 4 is set to FF as a flag
+ */
+ uint64_t unused_lanes;
+ JOB_AES_HMAC *job_in_lane[8];
+ DECLARE_ALIGNED(uint8_t init_blocks[8 * (4 * 16)], 32);
+} MB_MGR_CCM_OOO;
+
+
+/* AES-CMAC out-of-order scheduler structure */
+typedef struct {
+ AES_ARGS args; /* need to re-use AES arguments */
+ DECLARE_ALIGNED(uint16_t lens[8], 16);
+ DECLARE_ALIGNED(uint16_t init_done[8], 16);
+ /* each byte is index (0...3) of unused lanes
+ * byte 4 is set to FF as a flag
+ */
+ uint64_t unused_lanes;
+ JOB_AES_HMAC *job_in_lane[8];
+ DECLARE_ALIGNED(uint8_t scratch[8 * 16], 32);
+} MB_MGR_CMAC_OOO;
+
+
+/* DES out-of-order scheduler fields */
+typedef struct {
+ DES_ARGS_x16 args;
+ DECLARE_ALIGNED(uint16_t lens[16], 16);
+ /* each nibble is index (0...7) of unused lanes
+ * nibble 8 is set to F as a flag
+ */
+ uint64_t unused_lanes;
+ JOB_AES_HMAC *job_in_lane[16];
+ uint64_t num_lanes_inuse;
+} MB_MGR_DES_OOO;
+
+
+/* HMAC-SHA1 and HMAC-SHA256/224 */
+typedef struct {
+ /* YMM aligned access to extra_block */
+ DECLARE_ALIGNED(uint8_t extra_block[2 * SHA1_BLOCK_SIZE+8], 32);
+ JOB_AES_HMAC *job_in_lane;
+ uint8_t outer_block[64];
+ uint32_t outer_done;
+ uint32_t extra_blocks; /* num extra blocks (1 or 2) */
+ uint32_t size_offset; /* offset in extra_block to start of
+ * size field */
+ uint32_t start_offset; /* offset to start of data */
+} HMAC_SHA1_LANE_DATA;
+
+/* HMAC-SHA512/384 */
+typedef struct {
+ DECLARE_ALIGNED(uint8_t extra_block[2 * SHA_512_BLOCK_SIZE + 16], 32);
+ uint8_t outer_block[SHA_512_BLOCK_SIZE];
+ JOB_AES_HMAC *job_in_lane;
+ uint32_t outer_done;
+ uint32_t extra_blocks; /* num extra blocks (1 or 2) */
+ uint32_t size_offset; /* offset in extra_block to start of
+ * size field */
+ uint32_t start_offset; /* offset to start of data */
+} HMAC_SHA512_LANE_DATA;
+
+/*
+ * unused_lanes contains a list of unused lanes stored as bytes or as
+ * nibbles depending on the arch. The end of list is either FF or F.
+ */
+typedef struct {
+ SHA1_ARGS args;
+ DECLARE_ALIGNED(uint16_t lens[16], 32);
+ uint64_t unused_lanes;
+ HMAC_SHA1_LANE_DATA ldata[AVX512_NUM_SHA1_LANES];
+ uint32_t num_lanes_inuse;
+} MB_MGR_HMAC_SHA_1_OOO;
+
+typedef struct {
+ SHA256_ARGS args;
+ DECLARE_ALIGNED(uint16_t lens[16], 16);
+ uint64_t unused_lanes;
+ HMAC_SHA1_LANE_DATA ldata[AVX512_NUM_SHA256_LANES];
+ uint32_t num_lanes_inuse;
+} MB_MGR_HMAC_SHA_256_OOO;
+
+typedef struct {
+ SHA512_ARGS args;
+ DECLARE_ALIGNED(uint16_t lens[8], 16);
+ uint64_t unused_lanes;
+ HMAC_SHA512_LANE_DATA ldata[AVX512_NUM_SHA512_LANES];
+} MB_MGR_HMAC_SHA_512_OOO;
+
+/* MD5-HMAC out-of-order scheduler fields */
+typedef struct {
+ MD5_ARGS args;
+ DECLARE_ALIGNED(uint16_t lens[AVX512_NUM_MD5_LANES], 16);
+ /*
+ * In the avx2 case, all 16 nibbles of unused lanes are used.
+ * In that case num_lanes_inuse is used to detect the end of the list
+ */
+ uint64_t unused_lanes;
+ HMAC_SHA1_LANE_DATA ldata[AVX512_NUM_MD5_LANES];
+ uint32_t num_lanes_inuse;
+} MB_MGR_HMAC_MD5_OOO;
+
+
+/* KASUMI */
+
+/* 64 precomputed words for key schedule */
+#define KASUMI_KEY_SCHEDULE_SIZE 64
+
+/**
+ * Structure to maintain internal key scheduling
+ */
+typedef struct kasumi_key_sched_s {
+ /* Kasumi internal scheduling */
+ uint16_t sk16[KASUMI_KEY_SCHEDULE_SIZE]; /* key schedule */
+ uint16_t msk16[KASUMI_KEY_SCHEDULE_SIZE]; /* modified key schedule */
+} kasumi_key_sched_t;
+
+/* GCM data structures */
+#define GCM_BLOCK_LEN 16
+
+/**
+ * @brief holds GCM operation context
+ */
+struct gcm_context_data {
+ /* init, update and finalize context data */
+ uint8_t aad_hash[GCM_BLOCK_LEN];
+ uint64_t aad_length;
+ uint64_t in_length;
+ uint8_t partial_block_enc_key[GCM_BLOCK_LEN];
+ uint8_t orig_IV[GCM_BLOCK_LEN];
+ uint8_t current_counter[GCM_BLOCK_LEN];
+ uint64_t partial_block_length;
+};
+
+/* Authenticated Tag Length in bytes.
+ * Valid values are 16 (most likely), 12 or 8. */
+#define MAX_TAG_LEN (16)
+
+/*
+ * IV data is limited to 16 bytes as follows:
+ * 12 bytes is provided by an application -
+ * pre-counter block j0: 4 byte salt (from Security Association)
+ * concatenated with 8 byte Initialization Vector (from IPSec ESP
+ * Payload).
+ * 4 byte value 0x00000001 is padded automatically by the library -
+ * there is no need to add these 4 bytes on application side anymore.
+ */
+#define GCM_IV_DATA_LEN (12)
+
+#define LONGEST_TESTED_AAD_LENGTH (2 * 1024)
+
+/* Key lengths of 128 and 256 supported */
+#define GCM_128_KEY_LEN (16)
+#define GCM_192_KEY_LEN (24)
+#define GCM_256_KEY_LEN (32)
+
+/* #define GCM_BLOCK_LEN 16 */
+#define GCM_ENC_KEY_LEN 16
+#define GCM_KEY_SETS (15) /*exp key + 14 exp round keys*/
+
+/**
+ * @brief holds intermediate key data needed to improve performance
+ *
+ * gcm_key_data hold internal key information used by gcm128, gcm192 and gcm256.
+ */
+#ifdef __WIN32
+__declspec(align(64))
+#endif /* WIN32 */
+struct gcm_key_data {
+ uint8_t expanded_keys[GCM_ENC_KEY_LEN * GCM_KEY_SETS];
+ union {
+ /* Storage for precomputed hash keys */
+ struct {
+ /*
+ * This is needed for schoolbook multiply purposes.
+ * (HashKey<<1 mod poly), (HashKey^2<<1 mod poly), ...,
+ * (Hashkey^48<<1 mod poly)
+ */
+ uint8_t shifted_hkey[GCM_ENC_KEY_LEN * 8];
+ /*
+ * This is needed for Karatsuba multiply purposes.
+ * Storage for XOR of High 64 bits and low 64 bits
+ * of HashKey mod poly.
+ *
+ * (HashKey<<1 mod poly), (HashKey^2<<1 mod poly), ...,
+ * (Hashkey^128<<1 mod poly)
+ */
+ uint8_t shifted_hkey_k[GCM_ENC_KEY_LEN * 8];
+ } sse_avx;
+ struct {
+ /*
+ * This is needed for schoolbook multiply purposes.
+ * (HashKey<<1 mod poly), (HashKey^2<<1 mod poly), ...,
+ * (Hashkey^48<<1 mod poly)
+ */
+ uint8_t shifted_hkey[GCM_ENC_KEY_LEN * 8];
+ } avx2_avx512;
+ struct {
+#ifdef GCM_BIG_DATA
+ /*
+ * (HashKey<<1 mod poly), (HashKey^2<<1 mod poly), ...,
+ * (Hashkey^128<<1 mod poly)
+ */
+ uint8_t shifted_hkey[GCM_ENC_KEY_LEN * 128];
+#else
+ /*
+ * (HashKey<<1 mod poly), (HashKey^2<<1 mod poly), ...,
+ * (Hashkey^48<<1 mod poly)
+ */
+ uint8_t shifted_hkey[GCM_ENC_KEY_LEN * 48];
+#endif
+ } vaes_avx512;
+ } ghash_keys;
+}
+#ifdef LINUX
+__attribute__((aligned(64)));
+#else
+;
+#endif
+
+/* ========================================================================== */
+/* API data type definitions */
+struct MB_MGR;
+
+typedef void (*init_mb_mgr_t)(struct MB_MGR *);
+typedef JOB_AES_HMAC *(*get_next_job_t)(struct MB_MGR *);
+typedef JOB_AES_HMAC *(*submit_job_t)(struct MB_MGR *);
+typedef JOB_AES_HMAC *(*get_completed_job_t)(struct MB_MGR *);
+typedef JOB_AES_HMAC *(*flush_job_t)(struct MB_MGR *);
+typedef uint32_t (*queue_size_t)(struct MB_MGR *);
+typedef void (*keyexp_t)(const void *, void *, void *);
+typedef void (*cmac_subkey_gen_t)(const void *, void *, void *);
+typedef void (*hash_one_block_t)(const void *, void *);
+typedef void (*hash_fn_t)(const void *, const uint64_t, void *);
+typedef void (*xcbc_keyexp_t)(const void *, void *, void *, void *);
+typedef int (*des_keysched_t)(uint64_t *, const void *);
+typedef void (*aes128_cfb_t)(void *, const void *, const void *, const void *,
+ uint64_t);
+typedef void (*aes_gcm_enc_dec_t)(const struct gcm_key_data *,
+ struct gcm_context_data *,
+ uint8_t *, uint8_t const *, uint64_t,
+ const uint8_t *, uint8_t const *, uint64_t,
+ uint8_t *, uint64_t);
+typedef void (*aes_gcm_init_t)(const struct gcm_key_data *,
+ struct gcm_context_data *,
+ const uint8_t *, uint8_t const *, uint64_t);
+typedef void (*aes_gcm_enc_dec_update_t)(const struct gcm_key_data *,
+ struct gcm_context_data *,
+ uint8_t *, const uint8_t *, uint64_t);
+typedef void (*aes_gcm_enc_dec_finalize_t)(const struct gcm_key_data *,
+ struct gcm_context_data *,
+ uint8_t *, uint64_t);
+typedef void (*aes_gcm_precomp_t)(struct gcm_key_data *);
+typedef void (*aes_gcm_pre_t)(const void *, struct gcm_key_data *);
+
+typedef void (*zuc_eea3_1_buffer_t)(const void *, const void *, const void *,
+ void *, const uint32_t);
+
+typedef void (*zuc_eea3_4_buffer_t)(const void * const *, const void * const *,
+ const void * const *, void **,
+ const uint32_t *);
+
+typedef void (*zuc_eea3_n_buffer_t)(const void * const *, const void * const *,
+ const void * const *, void **,
+ const uint32_t *, const uint32_t);
+
+typedef void (*zuc_eia3_1_buffer_t)(const void *, const void *, const void *,
+ const uint32_t, uint32_t *);
+
+typedef void (*kasumi_f8_1_buffer_t)(const kasumi_key_sched_t *,
+ const uint64_t, const void *, void *,
+ const uint32_t);
+typedef void (*kasumi_f8_1_buffer_bit_t)(const kasumi_key_sched_t *,
+ const uint64_t, const void *,
+ void *,
+ const uint32_t, const uint32_t);
+typedef void (*kasumi_f8_2_buffer_t)(const kasumi_key_sched_t *,
+ const uint64_t, const uint64_t,
+ const void *, void *,
+ const uint32_t,
+ const void *, void *,
+ const uint32_t);
+typedef void (*kasumi_f8_3_buffer_t)(const kasumi_key_sched_t *,
+ const uint64_t, const uint64_t,
+ const uint64_t,
+ const void *, void *,
+ const void *, void *,
+ const void *, void *,
+ const uint32_t);
+typedef void (*kasumi_f8_4_buffer_t)(const kasumi_key_sched_t *,
+ const uint64_t, const uint64_t,
+ const uint64_t, const uint64_t,
+ const void *, void *,
+ const void *, void *,
+ const void *, void *,
+ const void *, void *,
+ const uint32_t);
+typedef void (*kasumi_f8_n_buffer_t)(const kasumi_key_sched_t *,
+ const uint64_t *, const void * const *,
+ void **, const uint32_t *,
+ const uint32_t);
+typedef void (*kasumi_f9_1_buffer_user_t)(const kasumi_key_sched_t *,
+ const uint64_t, const void *,
+ const uint32_t, void *,
+ const uint32_t);
+typedef void (*kasumi_f9_1_buffer_t)(const kasumi_key_sched_t *,
+ const void *,
+ const uint32_t, void *);
+typedef int (*kasumi_init_f8_key_sched_t)(const void *,
+ kasumi_key_sched_t *);
+typedef int (*kasumi_init_f9_key_sched_t)(const void *,
+ kasumi_key_sched_t *);
+typedef size_t (*kasumi_key_sched_size_t)(void);
+
+
+/**
+ * Snow3G key scheduling structure
+ */
+typedef struct snow3g_key_schedule_s {
+ /* KEY */
+ uint32_t k[4];
+} snow3g_key_schedule_t;
+
+typedef void (*snow3g_f8_1_buffer_t)(const snow3g_key_schedule_t *,
+ const void *, const void *,
+ void *, const uint32_t);
+
+typedef void (*snow3g_f8_1_buffer_bit_t)(const snow3g_key_schedule_t *,
+ const void *, const void *, void *,
+ const uint32_t, const uint32_t);
+
+typedef void (*snow3g_f8_2_buffer_t)(const snow3g_key_schedule_t *,
+ const void *, const void *,
+ const void *, void *, const uint32_t,
+ const void *, void *,const uint32_t);
+
+typedef void (*snow3g_f8_4_buffer_t)(const snow3g_key_schedule_t *,
+ const void *, const void *,const void *,
+ const void *, const void *, void *,
+ const uint32_t, const void *, void *,
+ const uint32_t, const void *, void *,
+ const uint32_t, const void *, void *,
+ const uint32_t);
+
+typedef void (*snow3g_f8_8_buffer_t)(const snow3g_key_schedule_t *,
+ const void *, const void *,const void *,
+ const void *, const void *, const void *,
+ const void *, const void *, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t, const void *,
+ void *, const uint32_t);
+
+typedef void
+(*snow3g_f8_8_buffer_multikey_t)(const snow3g_key_schedule_t * const [],
+ const void * const [], const void * const [],
+ void *[], const uint32_t[]);
+
+typedef void (*snow3g_f8_n_buffer_t)(const snow3g_key_schedule_t *,
+ const void * const [],
+ const void * const [],
+ void *[], const uint32_t[],
+ const uint32_t);
+
+typedef void
+(*snow3g_f8_n_buffer_multikey_t)(const snow3g_key_schedule_t * const [],
+ const void * const [],
+ const void * const [],
+ void *[], const uint32_t[],
+ const uint32_t);
+
+typedef void (*snow3g_f9_1_buffer_t)(const snow3g_key_schedule_t *,
+ const void *, const void *,
+ const uint64_t, void *);
+
+typedef int (*snow3g_init_key_sched_t)(const void *,
+ snow3g_key_schedule_t *);
+
+typedef size_t (*snow3g_key_sched_size_t)(void);
+
+/* ========================================================================== */
+/* Multi-buffer manager flags passed to alloc_mb_mgr() */
+
+#define IMB_FLAG_SHANI_OFF (1ULL << 0) /* disable use of SHANI extension */
+#define IMB_FLAG_AESNI_OFF (1ULL << 1) /* disable use of AESNI extension */
+
+/* ========================================================================== */
+/* Multi-buffer manager detected features
+ * - if bit is set then hardware supports given extension
+ * - valid after call to init_mb_mgr() or alloc_mb_mgr()
+ * - some HW supported features can be disabled via IMB_FLAG_xxx (see above)
+ */
+
+#define IMB_FEATURE_SHANI (1ULL << 0)
+#define IMB_FEATURE_AESNI (1ULL << 1)
+#define IMB_FEATURE_PCLMULQDQ (1ULL << 2)
+#define IMB_FEATURE_CMOV (1ULL << 3)
+#define IMB_FEATURE_SSE4_2 (1ULL << 4)
+#define IMB_FEATURE_AVX (1ULL << 5)
+#define IMB_FEATURE_AVX2 (1ULL << 6)
+#define IMB_FEATURE_AVX512F (1ULL << 7)
+#define IMB_FEATURE_AVX512DQ (1ULL << 8)
+#define IMB_FEATURE_AVX512CD (1ULL << 9)
+#define IMB_FEATURE_AVX512BW (1ULL << 10)
+#define IMB_FEATURE_AVX512VL (1ULL << 11)
+#define IMB_FEATURE_AVX512_SKX (IMB_FEATURE_AVX512F | IMB_FEATURE_AVX512DQ | \
+ IMB_FEATURE_AVX512CD | IMB_FEATURE_AVX512BW | \
+ IMB_FEATURE_AVX512VL)
+#define IMB_FEATURE_VAES (1ULL << 12)
+#define IMB_FEATURE_VPCLMULQDQ (1ULL << 13)
+#define IMB_FEATURE_SAFE_DATA (1ULL << 14)
+#define IMB_FEATURE_SAFE_PARAM (1ULL << 15)
+
+/* ========================================================================== */
+/* TOP LEVEL (MB_MGR) Data structure fields */
+
+#define MAX_JOBS 128
+
+typedef struct MB_MGR {
+ /*
+ * flags - passed to alloc_mb_mgr()
+ * features - reflects features of multi-buffer instance
+ */
+ uint64_t flags;
+ uint64_t features;
+
+ /*
+ * Reserved for the future
+ */
+ uint64_t reserved[6];
+
+ /*
+ * ARCH handlers / API
+ * Careful as changes here can break ABI compatibility
+ */
+ get_next_job_t get_next_job;
+ submit_job_t submit_job;
+ submit_job_t submit_job_nocheck;
+ get_completed_job_t get_completed_job;
+ flush_job_t flush_job;
+ queue_size_t queue_size;
+ keyexp_t keyexp_128;
+ keyexp_t keyexp_192;
+ keyexp_t keyexp_256;
+ cmac_subkey_gen_t cmac_subkey_gen_128;
+ xcbc_keyexp_t xcbc_keyexp;
+ des_keysched_t des_key_sched;
+ hash_one_block_t sha1_one_block;
+ hash_one_block_t sha224_one_block;
+ hash_one_block_t sha256_one_block;
+ hash_one_block_t sha384_one_block;
+ hash_one_block_t sha512_one_block;
+ hash_one_block_t md5_one_block;
+ hash_fn_t sha1;
+ hash_fn_t sha224;
+ hash_fn_t sha256;
+ hash_fn_t sha384;
+ hash_fn_t sha512;
+ aes128_cfb_t aes128_cfb_one;
+
+ aes_gcm_enc_dec_t gcm128_enc;
+ aes_gcm_enc_dec_t gcm192_enc;
+ aes_gcm_enc_dec_t gcm256_enc;
+ aes_gcm_enc_dec_t gcm128_dec;
+ aes_gcm_enc_dec_t gcm192_dec;
+ aes_gcm_enc_dec_t gcm256_dec;
+ aes_gcm_init_t gcm128_init;
+ aes_gcm_init_t gcm192_init;
+ aes_gcm_init_t gcm256_init;
+ aes_gcm_enc_dec_update_t gcm128_enc_update;
+ aes_gcm_enc_dec_update_t gcm192_enc_update;
+ aes_gcm_enc_dec_update_t gcm256_enc_update;
+ aes_gcm_enc_dec_update_t gcm128_dec_update;
+ aes_gcm_enc_dec_update_t gcm192_dec_update;
+ aes_gcm_enc_dec_update_t gcm256_dec_update;
+ aes_gcm_enc_dec_finalize_t gcm128_enc_finalize;
+ aes_gcm_enc_dec_finalize_t gcm192_enc_finalize;
+ aes_gcm_enc_dec_finalize_t gcm256_enc_finalize;
+ aes_gcm_enc_dec_finalize_t gcm128_dec_finalize;
+ aes_gcm_enc_dec_finalize_t gcm192_dec_finalize;
+ aes_gcm_enc_dec_finalize_t gcm256_dec_finalize;
+ aes_gcm_precomp_t gcm128_precomp;
+ aes_gcm_precomp_t gcm192_precomp;
+ aes_gcm_precomp_t gcm256_precomp;
+ aes_gcm_pre_t gcm128_pre;
+ aes_gcm_pre_t gcm192_pre;
+ aes_gcm_pre_t gcm256_pre;
+
+ zuc_eea3_1_buffer_t eea3_1_buffer;
+ zuc_eea3_4_buffer_t eea3_4_buffer;
+ zuc_eea3_n_buffer_t eea3_n_buffer;
+ zuc_eia3_1_buffer_t eia3_1_buffer;
+
+ kasumi_f8_1_buffer_t f8_1_buffer;
+ kasumi_f8_1_buffer_bit_t f8_1_buffer_bit;
+ kasumi_f8_2_buffer_t f8_2_buffer;
+ kasumi_f8_3_buffer_t f8_3_buffer;
+ kasumi_f8_4_buffer_t f8_4_buffer;
+ kasumi_f8_n_buffer_t f8_n_buffer;
+ kasumi_f9_1_buffer_t f9_1_buffer;
+ kasumi_f9_1_buffer_user_t f9_1_buffer_user;
+ kasumi_init_f8_key_sched_t kasumi_init_f8_key_sched;
+ kasumi_init_f9_key_sched_t kasumi_init_f9_key_sched;
+ kasumi_key_sched_size_t kasumi_key_sched_size;
+
+ snow3g_f8_1_buffer_bit_t snow3g_f8_1_buffer_bit;
+ snow3g_f8_1_buffer_t snow3g_f8_1_buffer;
+ snow3g_f8_2_buffer_t snow3g_f8_2_buffer;
+ snow3g_f8_4_buffer_t snow3g_f8_4_buffer;
+ snow3g_f8_8_buffer_t snow3g_f8_8_buffer;
+ snow3g_f8_n_buffer_t snow3g_f8_n_buffer;
+ snow3g_f8_8_buffer_multikey_t snow3g_f8_8_buffer_multikey;
+ snow3g_f8_n_buffer_multikey_t snow3g_f8_n_buffer_multikey;
+ snow3g_f9_1_buffer_t snow3g_f9_1_buffer;
+ snow3g_init_key_sched_t snow3g_init_key_sched;
+ snow3g_key_sched_size_t snow3g_key_sched_size;
+
+ /* in-order scheduler fields */
+ int earliest_job; /* byte offset, -1 if none */
+ int next_job; /* byte offset */
+ JOB_AES_HMAC jobs[MAX_JOBS];
+
+ /* out of order managers */
+ DECLARE_ALIGNED(MB_MGR_AES_OOO aes128_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_AES_OOO aes192_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_AES_OOO aes256_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_AES_OOO docsis_sec_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO des_enc_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO des_dec_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO des3_enc_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO des3_dec_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO docsis_des_enc_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_DES_OOO docsis_des_dec_ooo, 64);
+
+ DECLARE_ALIGNED(MB_MGR_HMAC_SHA_1_OOO hmac_sha_1_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_HMAC_SHA_256_OOO hmac_sha_224_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_HMAC_SHA_256_OOO hmac_sha_256_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_HMAC_SHA_512_OOO hmac_sha_384_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_HMAC_SHA_512_OOO hmac_sha_512_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_HMAC_MD5_OOO hmac_md5_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_AES_XCBC_OOO aes_xcbc_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_CCM_OOO aes_ccm_ooo, 64);
+ DECLARE_ALIGNED(MB_MGR_CMAC_OOO aes_cmac_ooo, 64);
+} MB_MGR;
+
+/* ========================================================================== */
+/* API definitions */
+
+/**
+ * @brief Get library version in string format
+ *
+ * @return library version string
+ */
+IMB_DLL_EXPORT const char *imb_get_version_str(void);
+
+/**
+ * @brief Get library version in numerical format
+ *
+ * Use IMB_VERSION() macro to compare this
+ * numerical version against known library version.
+ *
+ * @return library version number
+ */
+IMB_DLL_EXPORT unsigned imb_get_version(void);
+
+/*
+ * get_next_job returns a job object. This must be filled in and returned
+ * via submit_job before get_next_job is called again.
+ * After submit_job is called, one should call get_completed_job() at least
+ * once (and preferably until it returns NULL).
+ * get_completed_job and flush_job returns a job object. This job object ceases
+ * to be usable at the next call to get_next_job
+ */
+IMB_DLL_EXPORT MB_MGR *alloc_mb_mgr(uint64_t flags);
+IMB_DLL_EXPORT void free_mb_mgr(MB_MGR *state);
+
+IMB_DLL_EXPORT void init_mb_mgr_avx(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_avx(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_nocheck_avx(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *flush_job_avx(MB_MGR *state);
+IMB_DLL_EXPORT uint32_t queue_size_avx(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_completed_job_avx(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_next_job_avx(MB_MGR *state);
+
+IMB_DLL_EXPORT void init_mb_mgr_avx2(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_avx2(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_nocheck_avx2(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *flush_job_avx2(MB_MGR *state);
+IMB_DLL_EXPORT uint32_t queue_size_avx2(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_completed_job_avx2(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_next_job_avx2(MB_MGR *state);
+
+IMB_DLL_EXPORT void init_mb_mgr_avx512(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_avx512(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_nocheck_avx512(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *flush_job_avx512(MB_MGR *state);
+IMB_DLL_EXPORT uint32_t queue_size_avx512(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_completed_job_avx512(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_next_job_avx512(MB_MGR *state);
+
+IMB_DLL_EXPORT void init_mb_mgr_sse(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_sse(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *submit_job_nocheck_sse(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *flush_job_sse(MB_MGR *state);
+IMB_DLL_EXPORT uint32_t queue_size_sse(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_completed_job_sse(MB_MGR *state);
+IMB_DLL_EXPORT JOB_AES_HMAC *get_next_job_sse(MB_MGR *state);
+
+/*
+ * Wrapper macros to call arch API's set up
+ * at init phase of multi-buffer manager.
+ *
+ * For example, after calling init_mb_mgr_sse(&mgr)
+ * The 'mgr' structure be set up so that:
+ * mgr.get_next_job will point to get_next_job_sse(),
+ * mgr.submit_job will point to submit_job_sse(),
+ * mgr.submit_job_nocheck will point to submit_job_nocheck_sse(),
+ * mgr.get_completed_job will point to get_completed_job_sse(),
+ * mgr.flush_job will point to flush_job_sse(),
+ * mgr.queue_size will point to queue_size_sse()
+ * mgr.keyexp_128 will point to aes_keyexp_128_sse()
+ * mgr.keyexp_192 will point to aes_keyexp_192_sse()
+ * mgr.keyexp_256 will point to aes_keyexp_256_sse()
+ * etc.
+ *
+ * Direct use of arch API's may result in better performance.
+ * Using below indirect interface may produce slightly worse performance but
+ * it can simplify application implementation.
+ * LibTestApp provides example of using the indirect interface.
+ */
+#define IMB_GET_NEXT_JOB(_mgr) ((_mgr)->get_next_job((_mgr)))
+#define IMB_SUBMIT_JOB(_mgr) ((_mgr)->submit_job((_mgr)))
+#define IMB_SUBMIT_JOB_NOCHECK(_mgr) ((_mgr)->submit_job_nocheck((_mgr)))
+#define IMB_GET_COMPLETED_JOB(_mgr) ((_mgr)->get_completed_job((_mgr)))
+#define IMB_FLUSH_JOB(_mgr) ((_mgr)->flush_job((_mgr)))
+#define IMB_QUEUE_SIZE(_mgr) ((_mgr)->queue_size((_mgr)))
+
+/* Key expansion and generation API's */
+#define IMB_AES_KEYEXP_128(_mgr, _raw, _enc, _dec) \
+ ((_mgr)->keyexp_128((_raw), (_enc), (_dec)))
+#define IMB_AES_KEYEXP_192(_mgr, _raw, _enc, _dec) \
+ ((_mgr)->keyexp_192((_raw), (_enc), (_dec)))
+#define IMB_AES_KEYEXP_256(_mgr, _raw, _enc, _dec) \
+ ((_mgr)->keyexp_256((_raw), (_enc), (_dec)))
+
+#define IMB_AES_CMAC_SUBKEY_GEN_128(_mgr, _key_exp, _k1, _k2) \
+ ((_mgr)->cmac_subkey_gen_128((_key_exp), (_k1), (_k2)))
+
+#define IMB_AES_XCBC_KEYEXP(_mgr, _key, _k1_exp, _k2, _k3) \
+ ((_mgr)->xcbc_keyexp((_key), (_k1_exp), (_k2), (_k3)))
+
+#define IMB_DES_KEYSCHED(_mgr, _ks, _key) \
+ ((_mgr)->des_key_sched((_ks), (_key)))
+
+/* Hash API's */
+#define IMB_SHA1_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->sha1_one_block((_data), (_digest)))
+#define IMB_SHA1(_mgr, _data, _length, _digest) \
+ ((_mgr)->sha1((_data), (_length), (_digest)))
+#define IMB_SHA224_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->sha224_one_block((_data), (_digest)))
+#define IMB_SHA224(_mgr, _data, _length, _digest) \
+ ((_mgr)->sha224((_data), (_length), (_digest)))
+#define IMB_SHA256_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->sha256_one_block((_data), (_digest)))
+#define IMB_SHA256(_mgr, _data, _length, _digest) \
+ ((_mgr)->sha256((_data), (_length), (_digest)))
+#define IMB_SHA384_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->sha384_one_block((_data), (_digest)))
+#define IMB_SHA384(_mgr, _data, _length, _digest) \
+ ((_mgr)->sha384((_data), (_length), (_digest)))
+#define IMB_SHA512_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->sha512_one_block((_data), (_digest)))
+#define IMB_SHA512(_mgr, _data, _length, _digest) \
+ ((_mgr)->sha512((_data), (_length), (_digest)))
+#define IMB_MD5_ONE_BLOCK(_mgr, _data, _digest) \
+ ((_mgr)->md5_one_block((_data), (_digest)))
+
+/* AES-CFB API */
+#define IMB_AES128_CFB_ONE(_mgr, _out, _in, _iv, _enc, _len) \
+ ((_mgr)->aes128_cfb_one((_out), (_in), (_iv), (_enc), (_len)))
+
+/* AES-GCM API's */
+#define IMB_AES128_GCM_ENC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm128_enc((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+#define IMB_AES192_GCM_ENC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm192_enc((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+#define IMB_AES256_GCM_ENC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm256_enc((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+
+#define IMB_AES128_GCM_DEC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm128_dec((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+#define IMB_AES192_GCM_DEC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm192_dec((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+#define IMB_AES256_GCM_DEC(_mgr, _key, _ctx, _out, _in, _len, _iv, _aad, _aadl,\
+ _tag, _tagl) \
+ ((_mgr)->gcm256_dec((_key), (_ctx), (_out), (_in), (_len), (_iv), \
+ (_aad), (_aadl), (_tag), (_tagl)))
+
+#define IMB_AES128_GCM_INIT(_mgr, _key, _ctx, _iv, _aad, _aadl) \
+ ((_mgr)->gcm128_init((_key), (_ctx), (_iv), (_aad), (_aadl)))
+#define IMB_AES192_GCM_INIT(_mgr, _key, _ctx, _iv, _aad, _aadl) \
+ ((_mgr)->gcm192_init((_key), (_ctx), (_iv), (_aad), (_aadl)))
+#define IMB_AES256_GCM_INIT(_mgr, _key, _ctx, _iv, _aad, _aadl) \
+ ((_mgr)->gcm256_init((_key), (_ctx), (_iv), (_aad), (_aadl)))
+
+#define IMB_AES128_GCM_ENC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm128_enc_update((_key), (_ctx), (_out), (_in), (_len)))
+#define IMB_AES192_GCM_ENC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm192_enc_update((_key), (_ctx), (_out), (_in), (_len)))
+#define IMB_AES256_GCM_ENC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm256_enc_update((_key), (_ctx), (_out), (_in), (_len)))
+
+#define IMB_AES128_GCM_DEC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm128_dec_update((_key), (_ctx), (_out), (_in), (_len)))
+#define IMB_AES192_GCM_DEC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm192_dec_update((_key), (_ctx), (_out), (_in), (_len)))
+#define IMB_AES256_GCM_DEC_UPDATE(_mgr, _key, _ctx, _out, _in, _len) \
+ ((_mgr)->gcm256_dec_update((_key), (_ctx), (_out), (_in), (_len)))
+
+#define IMB_AES128_GCM_ENC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm128_enc_finalize((_key), (_ctx), (_tag), (_tagl)))
+#define IMB_AES192_GCM_ENC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm192_enc_finalize((_key), (_ctx), (_tag), (_tagl)))
+#define IMB_AES256_GCM_ENC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm256_enc_finalize((_key), (_ctx), (_tag), (_tagl)))
+
+#define IMB_AES128_GCM_DEC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm128_dec_finalize((_key), (_ctx), (_tag), (_tagl)))
+#define IMB_AES192_GCM_DEC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm192_dec_finalize((_key), (_ctx), (_tag), (_tagl)))
+#define IMB_AES256_GCM_DEC_FINALIZE(_mgr, _key, _ctx, _tag, _tagl) \
+ ((_mgr)->gcm256_dec_finalize((_key), (_ctx), (_tag), (_tagl)))
+
+#define IMB_AES128_GCM_PRECOMP(_mgr, _key) \
+ ((_mgr)->gcm128_precomp((_key)))
+#define IMB_AES192_GCM_PRECOMP(_mgr, _key) \
+ ((_mgr)->gcm192_precomp((_key)))
+#define IMB_AES256_GCM_PRECOMP(_mgr, _key) \
+ ((_mgr)->gcm256_precomp((_key)))
+
+#define IMB_AES128_GCM_PRE(_mgr, _key_in, _key_exp) \
+ ((_mgr)->gcm128_pre((_key_in), (_key_exp)))
+#define IMB_AES192_GCM_PRE(_mgr, _key_in, _key_exp) \
+ ((_mgr)->gcm192_pre((_key_in), (_key_exp)))
+#define IMB_AES256_GCM_PRE(_mgr, _key_in, _key_exp) \
+ ((_mgr)->gcm256_pre((_key_in), (_key_exp)))
+
+/* ZUC EEA3/EIA3 functions */
+
+/**
+ * @brief ZUC EEA3 Confidentiality functions
+ *
+ * @param mgr Pointer to multi-buffer structure
+ * @param key Pointer to key
+ * @param iv Pointer to 16-byte IV
+ * @param in Pointer to Plaintext/Ciphertext input.
+ * @param out Pointer to Ciphertext/Plaintext output.
+ * @param len Length of input data in bytes.
+ */
+#define IMB_ZUC_EEA3_1_BUFFER(_mgr, _key, _iv, _in, _out, _len) \
+ ((_mgr)->eea3_1_buffer((_key), (_iv), (_in), (_out), (_len)))
+#define IMB_ZUC_EEA3_4_BUFFER(_mgr, _key, _iv, _in, _out, _len) \
+ ((_mgr)->eea3_4_buffer((_key), (_iv), (_in), (_out), (_len)))
+#define IMB_ZUC_EEA3_N_BUFFER(_mgr, _key, _iv, _in, _out, _len, _num) \
+ ((_mgr)->eea3_n_buffer((_key), (_iv), (_in), (_out), (_len), (_num)))
+
+
+/**
+ * @brief ZUC EIA3 Integrity function
+ *
+ * @param mgr Pointer to multi-buffer structure
+ * @param key Pointer to key
+ * @param iv Pointer to 16-byte IV
+ * @param in Pointer to Plaintext/Ciphertext input.
+ * @param len Length of input data in bits.
+ * @param tag Pointer to Authenticated Tag output (4 bytes)
+ */
+#define IMB_ZUC_EIA3_1_BUFFER(_mgr, _key, _iv, _in, _len, _tag) \
+ ((_mgr)->eia3_1_buffer((_key), (_iv), (_in), (_len), (_tag)))
+
+
+/* KASUMI F8/F9 functions */
+
+/**
+ * @brief Kasumi byte-level f8 operation on a single buffer
+ *
+ * This function performs kasumi f8 operation on a single buffer. The key has
+ * already been scheduled with kasumi_init_f8_key_sched().
+ * No extra bits are modified.
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv Initialization vector
+ * @param [in] in Input buffer
+ * @param [out] out Output buffer
+ * @param [in] len Length in BYTES
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_F8_1_BUFFER(_mgr, _ctx, _iv, _in, _out, _len) \
+ ((_mgr)->f8_1_buffer((_ctx), (_iv), (_in), (_out), (_len)))
+
+/**
+ * @brief Kasumi bit-level f8 operation on a single buffer
+ *
+ * This function performs kasumi f8 operation on a single buffer. The key has
+ * already been scheduled with kasumi_init_f8_key_sched().
+ * No extra bits are modified.
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv Initialization vector
+ * @param [in] in Input buffer
+ * @param [out] out Output buffer
+ * @param [in] len Length in BITS
+ * @param [in] offset Offset in BITS from begin of input buffer
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_F8_1_BUFFER_BIT(_mgr, _ctx, _iv, _in, _out, _len, _offset) \
+ ((_mgr)->f8_1_buffer_bit((_ctx), (_iv), (_in), (_out), (_len), \
+ (_offset)))
+
+/**
+ * @brief Kasumi byte-level f8 operation in parallel on two buffers
+ *
+ * This function performs kasumi f8 operation on a two buffers.
+ * They will be processed with the same key, which has already been scheduled
+ * with kasumi_init_f8_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv1 Initialization vector for buffer in1
+ * @param [in] iv2 Initialization vector for buffer in2
+ * @param [in] in1 Input buffer 1
+ * @param [out] out1 Output buffer 1
+ * @param [in] len1 Length in BYTES of input buffer 1
+ * @param [in] in2 Input buffer 2
+ * @param [out] out2 Output buffer 2
+ * @param [in] len2 Length in BYTES of input buffer 2
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_F8_2_BUFFER(_mgr, _ctx, _iv1, _iv2, _in1, _out1, _len1, \
+ _in2, _out2, _len2) \
+ ((_mgr)->f8_2_buffer((_ctx), (_iv1), (_iv2), (_in1), (_out1), (_len1), \
+ (_in2), (_out2), (_len2)))
+/**
+ * @brief kasumi byte-level f8 operation in parallel on three buffers
+ *
+ * This function performs kasumi f8 operation on a three buffers.
+ * They must all have the same length and they will be processed with the same
+ * key, which has already been scheduled with kasumi_init_f8_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv1 Initialization vector for buffer in1
+ * @param [in] iv2 Initialization vector for buffer in2
+ * @param [in] iv3 Initialization vector for buffer in3
+ * @param [in] in1 Input buffer 1
+ * @param [out] out1 Output buffer 1
+ * @param [in] in2 Input buffer 2
+ * @param [out] out2 Output buffer 2
+ * @param [in] in3 Input buffer 3
+ * @param [out] out3 Output buffer 3
+ * @param [in] len Common length in bytes for all buffers
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_F8_3_BUFFER(_mgr, _ctx, _iv1, _iv2, _iv3, _in1, _out1, \
+ _in2, _out2, _in3, _out3, _len) \
+ ((_mgr)->f8_3_buffer((_ctx), (_iv1), (_iv2), (_iv3), (_in1), (_out1), \
+ (_in2), (_out2), (_in3), (_out3), (_len)))
+/**
+ * @brief kasumi byte-level f8 operation in parallel on four buffers
+ *
+ * This function performs kasumi f8 operation on four buffers.
+ * They must all have the same length and they will be processed with the same
+ * key, which has already been scheduled with kasumi_init_f8_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv1 Initialization vector for buffer in1
+ * @param [in] iv2 Initialization vector for buffer in2
+ * @param [in] iv3 Initialization vector for buffer in3
+ * @param [in] iv4 Initialization vector for buffer in4
+ * @param [in] in1 Input buffer 1
+ * @param [out] out1 Output buffer 1
+ * @param [in] in2 Input buffer 2
+ * @param [out] out2 Output buffer 2
+ * @param [in] in3 Input buffer 3
+ * @param [out] out3 Output buffer 3
+ * @param [in] in4 Input buffer 4
+ * @param [out] out4 Output buffer 4
+ * @param [in] len Common length in bytes for all buffers
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_F8_4_BUFFER(_mgr, _ctx, _iv1, _iv2, _iv3, _iv4, \
+ _in1, _out1, _in2, _out2, _in3, _out3, \
+ _in4, _out4, _len) \
+ ((_mgr)->f8_4_buffer((_ctx), (_iv1), (_iv2), (_iv3), (_iv4), \
+ (_in1), (_out1), (_in2), (_out2), \
+ (_in3), (_out3), (_in4), (_out4), (_len)))
+/**
+ * @brief Kasumi f8 operation on N buffers
+ *
+ * All input buffers can have different lengths and they will be processed
+ * with the same key, which has already been scheduled
+ * with kasumi_init_f8_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv Array of IV values
+ * @param [in] in Array of input buffers
+ * @param [out] out Array of output buffers
+ * @param [in] len Array of corresponding input buffer lengths in BITS
+ * @param [in] count Number of input buffers
+ */
+#define IMB_KASUMI_F8_N_BUFFER(_mgr, _ctx, _iv, _in, _out, _len, _count) \
+ ((_mgr)->f8_n_buffer((_ctx), (_iv), (_in), (_out), (_len), \
+ (_count)))
+/**
+ * @brief Kasumi bit-level f9 operation on a single buffer.
+ *
+ * The first QWORD of in represents the COUNT and FRESH, the last QWORD
+ * represents the DIRECTION and PADDING. (See 3GPP TS 35.201 v10.0 section 4)
+ *
+ * The key has already been scheduled with kasumi_init_f9_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] in Input buffer
+ * @param [in] len Length in BYTES of the data to be hashed
+ * @param [out] tag Computed digest
+ *
+ */
+#define IMB_KASUMI_F9_1_BUFFER(_mgr, _ctx, _in, _len, _tag) \
+ ((_mgr)->f9_1_buffer((_ctx), (_in), (_len), (_tag)))
+
+/**
+ * @brief Kasumi bit-level f9 operation on a single buffer.
+ *
+ * The key has already been scheduled with kasumi_init_f9_key_sched().
+ *
+ * @param [in] ctx Context where the scheduled keys are stored
+ * @param [in] iv Initialization vector
+ * @param [in] in Input buffer
+ * @param [in] len Length in BITS of the data to be hashed
+ * @param [out] tag Computed digest
+ * @param [in] dir Direction bit
+ *
+ */
+#define IMB_KASUMI_F9_1_BUFFER_USER(_mgr, _ctx, _iv, _in, _len, _tag, _dir) \
+ ((_mgr)->f9_1_buffer_user((_ctx), (_iv), (_in), (_len), \
+ (_tag), (_dir)))
+
+/**
+ * KASUMI F8 key schedule init function.
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] key Confidentiality key (expected in LE format)
+ * @param[out] ctx Key schedule context to be initialised
+ * @return 0 on success, -1 on failure
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_INIT_F8_KEY_SCHED(_mgr, _key, _ctx) \
+ ((_mgr)->kasumi_init_f8_key_sched((_key), (_ctx)))
+
+/**
+ * KASUMI F9 key schedule init function.
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] key Integrity key (expected in LE format)
+ * @param[out] ctx Key schedule context to be initialised
+ * @return 0 on success, -1 on failure
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_INIT_F9_KEY_SCHED(_mgr, _key, _ctx) \
+ ((_mgr)->kasumi_init_f9_key_sched((_key), (_ctx)))
+
+/**
+ *******************************************************************************
+ * This function returns the size of the kasumi_key_sched_t, used
+ * to store the key schedule.
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @return size of kasumi_key_sched_t type success
+ *
+ ******************************************************************************/
+#define IMB_KASUMI_KEY_SCHED_SIZE(_mgr)((_mgr)->kasumi_key_sched_size())
+
+
+/* SNOW3G F8/F9 functions */
+
+/**
+ * This function performs snow3g f8 operation on a single buffer. The key has
+ * already been scheduled with snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv iv[3] = count
+ * iv[2] = (bearer << 27) | ((dir & 0x1) << 26)
+ * iv[1] = pIV[3]
+ * iv[0] = pIV[2]
+ * @param[in] in Input buffer
+ * @param[out] out Output buffer
+ * @param[in] len Length in bits of input buffer
+ * @param[in] offset Offset in input/output buffer (in bits)
+ */
+#define IMB_SNOW3G_F8_1_BUFFER_BIT(_mgr, _ctx, _iv, _in, _out, _len, _offset) \
+ ((_mgr)->snow3g_f8_1_buffer_bit((_ctx), (_iv), (_in), \
+ (_out), (_len), (_offset)))
+
+/**
+ * This function performs snow3g f8 operation on a single buffer. The key has
+ * already been scheduled with snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv iv[3] = count
+ * iv[2] = (bearer << 27) | ((dir & 0x1) << 26)
+ * iv[1] = pIV[3]
+ * iv[0] = pIV[2]
+ * @param[in] in Input buffer
+ * @param[out] out Output buffer
+ * @param[in] len Length in bits of input buffer
+ */
+#define IMB_SNOW3G_F8_1_BUFFER(_mgr, _ctx, _iv, _in, _out, _len) \
+ ((_mgr)->snow3g_f8_1_buffer((_ctx), (_iv), (_in), (_out), (_len)))
+
+/**
+ * This function performs snow3g f8 operation on two buffers. They will
+ * be processed with the same key, which has already been scheduled with
+ * snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv1 IV to use for buffer pBufferIn1
+ * @param[in] iv2 IV to use for buffer pBufferIn2
+ * @param[in] in1 Input buffer 1
+ * @param[out] out1 Output buffer 1
+ * @param[in] len1 Length in bytes of input buffer 1
+ * @param[in] in2 Input buffer 2
+ * @param[out] out2 Output buffer 2
+ * @param[in] len2 Length in bytes of input buffer 2
+ */
+#define IMB_SNOW3G_F8_2_BUFFER(_mgr, _ctx, _iv1, _iv2, \
+ _in1, _out1, _len1, \
+ _in2, _out2, _len2) \
+ ((_mgr)->snow3g_f8_2_buffer((_ctx), (_iv1), (_iv2), \
+ (_in1), (_out1), (_len1), \
+ (_in2), (_out2), (_len2)))
+
+/**
+ *******************************************************************************
+ * This function performs snow3g f8 operation on four buffers. They will
+ * be processed with the same key, which has already been scheduled with
+ * snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv1 IV to use for buffer pBufferIn1
+ * @param[in] iv2 IV to use for buffer pBufferIn2
+ * @param[in] iv3 IV to use for buffer pBufferIn3
+ * @param[in] iv4 IV to use for buffer pBufferIn4
+ * @param[in] in1 Input buffer 1
+ * @param[out] out1 Output buffer 1
+ * @param[in] len1 Length in bytes of input buffer 1
+ * @param[in] in2 Input buffer 2
+ * @param[out] out2 Output buffer 2
+ * @param[in] len2 Length in bytes of input buffer 2
+ * @param[in] in3 Input buffer 3
+ * @param[out] out3 Output buffer 3
+ * @param[in] len3 Length in bytes of input buffer 3
+ * @param[in] in4 Input buffer 4
+ * @param[out] out4 Output buffer 4
+ * @param[in] len4 Length in bytes of input buffer 4
+ */
+#define IMB_SNOW3G_F8_4_BUFFER(_mgr, _ctx, _iv1, _iv2, _iv3, _iv4, \
+ _in1, _out1, _len1, \
+ _in2, _out2, _len2, \
+ _in3, _out3, _len3, \
+ _in4, _out4, _len4) \
+ ((_mgr)->snow3g_f8_4_buffer((_ctx), (_iv1), (_iv2), (_iv3), (_iv4), \
+ (_in1), (_out1), (_len1), \
+ (_in2), (_out2), (_len2), \
+ (_in3), (_out3), (_len3), \
+ (_in4), (_out4), (_len4)))
+
+/**
+ *******************************************************************************
+ * This function performs snow3g f8 operation on eight buffers. They will
+ * be processed with the same key, which has already been scheduled with
+ * snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv1 IV to use for buffer pBufferIn1
+ * @param[in] iv2 IV to use for buffer pBufferIn2
+ * @param[in] iv3 IV to use for buffer pBufferIn3
+ * @param[in] iv4 IV to use for buffer pBufferIn4
+ * @param[in] iv5 IV to use for buffer pBufferIn5
+ * @param[in] iv6 IV to use for buffer pBufferIn6
+ * @param[in] iv7 IV to use for buffer pBufferIn7
+ * @param[in] iv8 IV to use for buffer pBufferIn8
+ * @param[in] in1 Input buffer 1
+ * @param[out] out1 Output buffer 1
+ * @param[in] len1 Length in bytes of input buffer 1
+ * @param[in] in2 Input buffer 2
+ * @param[out] out2 Output buffer 2
+ * @param[in] len2 Length in bytes of input buffer 2
+ * @param[in] in3 Input buffer 3
+ * @param[out] out3 Output buffer 3
+ * @param[in] len3 Length in bytes of input buffer 3
+ * @param[in] in4 Input buffer 4
+ * @param[out] out4 Output buffer 4
+ * @param[in] len4 Length in bytes of input buffer 4
+ * @param[in] in5 Input buffer 5
+ * @param[out] out5 Output buffer 5
+ * @param[in] len5 Length in bytes of input buffer 5
+ * @param[in] in6 Input buffer 6
+ * @param[out] out6 Output buffer 6
+ * @param[in] len6 Length in bytes of input buffer 6
+ * @param[in] in7 Input buffer 7
+ * @param[out] out7 Output buffer 7
+ * @param[in] len7 Length in bytes of input buffer 7
+ * @param[in] in8 Input buffer 8
+ * @param[out] out8 Output buffer 8
+ * @param[in] len8 Length in bytes of input buffer 8
+ */
+#define IMB_SNOW3G_F8_8_BUFFER(_mgr, _ctx, _iv1, _iv2, _iv3, _iv4, \
+ _iv5, _iv6, _iv7, _iv8, \
+ _in1, _out1, _len1, \
+ _in2, _out2, _len2, \
+ _in3, _out3, _len3, \
+ _in4, _out4, _len4, \
+ _in5, _out5, _len5, \
+ _in6, _out6, _len6, \
+ _in7, _out7, _len7, \
+ _in8, _out8, _len8) \
+ ((_mgr)->snow3g_f8_8_buffer((_ctx), (_iv1), (_iv2), (_iv3), (_iv4), \
+ (_iv5), (_iv6), (_iv7), (_iv8), \
+ (_in1), (_out1), (_len1), \
+ (_in2), (_out2), (_len2), \
+ (_in3), (_out3), (_len3), \
+ (_in4), (_out4), (_len4), \
+ (_in5), (_out5), (_len5), \
+ (_in6), (_out6), (_len6), \
+ (_in7), (_out7), (_len7), \
+ (_in8), (_out8), (_len8)))
+/**
+ * This function performs snow3g f8 operation on eight buffers. They will
+ * be processed with individual keys, which have already been scheduled
+ * with snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Array of 8 Contexts, where the scheduled keys are stored
+ * @param[in] iv Array of 8 IV values
+ * @param[in] in Array of 8 input buffers
+ * @param[out] out Array of 8 output buffers
+ * @param[in] lens Array of 8 corresponding input buffer lengths
+ */
+#define IMB_SNOW3G_F8_8_BUFFER_MULTIKEY(_mgr, _ctx, _iv, _in, _out, _len) \
+ ((_mgr)->snow3g_f8_8_buffer_multikey((_ctx), (_iv), (_in), (_out),\
+ (_len)))
+
+/**
+ * This function performs snow3g f8 operation in parallel on N buffers. All
+ * input buffers can have different lengths and they will be processed with the
+ * same key, which has already been scheduled with snow3g_init_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv Array of IV values
+ * @param[in] in Array of input buffers
+ * @param[out] out Array of output buffers - out[0] set to NULL on failure
+ * @param[in] len Array of corresponding input buffer lengths
+ * @param[in] count Number of input buffers
+ *
+ ******************************************************************************/
+#define IMB_SNOW3G_F8_N_BUFFER(_mgr, _ctx, _iv, _in, _out, _len, _count) \
+ ((_mgr)->snow3g_f8_n_buffer((_ctx), (_iv), (_in), \
+ (_out), (_len), (_count)))
+
+/**
+ * This function performs snow3g f8 operation in parallel on N buffers. All
+ * input buffers can have different lengths. Confidentiallity keys can vary,
+ * schedules with snow3g_init_key_sched_multi().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Array of Contexts, where the scheduled keys are stored
+ * @param[in] iv Array of IV values
+ * @param[in] in Array of input buffers
+ * @param[out] out Array of output buffers
+ * - out[0] set to NULL on failure
+ * @param[in] len Array of corresponding input buffer lengths
+ * @param[in] count Number of input buffers
+ */
+#define IMB_SNOW3G_F8_N_BUFFER_MULTIKEY(_mgr, _ctx, _iv, _in, \
+ _out, _len, _count) \
+ ((_mgr)->snow3g_f8_n_buffer_multikey((_ctx), (_iv), (_in), \
+ (_out), (_len), (_count)))
+
+/**
+ * This function performs a snow3g f9 operation on a single block of data. The
+ * key has already been scheduled with snow3g_init_f8_key_sched().
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] ctx Context where the scheduled keys are stored
+ * @param[in] iv iv[3] = _BSWAP32(fresh^(dir<<15))
+ * iv[2] = _BSWAP32(count^(dir<<31))
+ * iv[1] = _BSWAP32(fresh)
+ * iv[0] = _BSWAP32(count)
+ *
+ * @param[in] in Input buffer
+ * @param[in] len Length in bits of the data to be hashed
+ * @param[out] digest Computed digest
+ */
+#define IMB_SNOW3G_F9_1_BUFFER(_mgr, _ctx, _iv, _in, _len, _digest) \
+ ((_mgr)->snow3g_f9_1_buffer((_ctx), (_iv), (_in), (_len), (_digest)))
+
+/**
+ * Snow3g key schedule init function.
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @param[in] key Confidentiality/Integrity key (expected in LE format)
+ * @param[out] ctx Key schedule context to be initialised
+ * @return 0 on success
+ * @return -1 on error
+ *
+ ******************************************************************************/
+#define IMB_SNOW3G_INIT_KEY_SCHED(_mgr, _key, _ctx) \
+ ((_mgr)->snow3g_init_key_sched((_key), (_ctx)))
+
+/**
+ *******************************************************************************
+ * This function returns the size of the snow3g_key_schedule_t, used
+ * to store the key schedule.
+ *
+ * @param[in] mgr Pointer to multi-buffer structure
+ * @return size of snow3g_key_schedule_t type
+ *
+ ******************************************************************************/
+#define IMB_SNOW3G_KEY_SCHED_SIZE(_mgr)((_mgr)->snow3g_key_sched_size())
+
+
+/* Auxiliary functions */
+
+/**
+ * @brief DES key schedule set up
+ *
+ * \a ks buffer needs to accomodate \a DES_KEY_SCHED_SIZE (128) bytes of data.
+ *
+ * @param ks destination buffer to accomodate DES key schedule
+ * @param key a pointer to an 8 byte DES key
+ *
+ * @return Operation status
+ * @retval 0 success
+ * @retval !0 error
+ */
+IMB_DLL_EXPORT int
+des_key_schedule(uint64_t *ks, const void *key);
+
+/* SSE */
+IMB_DLL_EXPORT void sha1_sse(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha1_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void sha224_sse(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha224_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void sha256_sse(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha256_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void sha384_sse(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha384_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void sha512_sse(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha512_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void md5_one_block_sse(const void *data, void *digest);
+IMB_DLL_EXPORT void aes_keyexp_128_sse(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_sse(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_sse(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_xcbc_expand_key_sse(const void *key, void *k1_exp,
+ void *k2, void *k3);
+IMB_DLL_EXPORT void aes_keyexp_128_enc_sse(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_enc_sse(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_enc_sse(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_cmac_subkey_gen_sse(const void *key_exp, void *key1,
+ void *key2);
+IMB_DLL_EXPORT void aes_cfb_128_one_sse(void *out, const void *in,
+ const void *iv, const void *keys,
+ uint64_t len);
+
+/* AVX */
+IMB_DLL_EXPORT void sha1_avx(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha1_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void sha224_avx(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha224_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void sha256_avx(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha256_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void sha384_avx(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha384_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void sha512_avx(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha512_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void md5_one_block_avx(const void *data, void *digest);
+IMB_DLL_EXPORT void aes_keyexp_128_avx(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_avx(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_avx(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_xcbc_expand_key_avx(const void *key, void *k1_exp,
+ void *k2, void *k3);
+IMB_DLL_EXPORT void aes_keyexp_128_enc_avx(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_enc_avx(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_enc_avx(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_cmac_subkey_gen_avx(const void *key_exp, void *key1,
+ void *key2);
+IMB_DLL_EXPORT void aes_cfb_128_one_avx(void *out, const void *in,
+ const void *iv, const void *keys,
+ uint64_t len);
+
+/* AVX2 */
+IMB_DLL_EXPORT void sha1_avx2(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha1_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void sha224_avx2(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha224_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void sha256_avx2(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha256_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void sha384_avx2(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha384_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void sha512_avx2(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha512_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void md5_one_block_avx2(const void *data, void *digest);
+IMB_DLL_EXPORT void aes_keyexp_128_avx2(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_avx2(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_avx2(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_xcbc_expand_key_avx2(const void *key, void *k1_exp,
+ void *k2, void *k3);
+IMB_DLL_EXPORT void aes_keyexp_128_enc_avx2(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_enc_avx2(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_enc_avx2(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_cmac_subkey_gen_avx2(const void *key_exp, void *key1,
+ void *key2);
+IMB_DLL_EXPORT void aes_cfb_128_one_avx2(void *out, const void *in,
+ const void *iv, const void *keys,
+ uint64_t len);
+
+/* AVX512 */
+IMB_DLL_EXPORT void sha1_avx512(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha1_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void sha224_avx512(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha224_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void sha256_avx512(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha256_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void sha384_avx512(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha384_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void sha512_avx512(const void *data, const uint64_t length,
+ void *digest);
+IMB_DLL_EXPORT void sha512_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void md5_one_block_avx512(const void *data, void *digest);
+IMB_DLL_EXPORT void aes_keyexp_128_avx512(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_avx512(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_avx512(const void *key, void *enc_exp_keys,
+ void *dec_exp_keys);
+IMB_DLL_EXPORT void aes_xcbc_expand_key_avx512(const void *key, void *k1_exp,
+ void *k2, void *k3);
+IMB_DLL_EXPORT void aes_keyexp_128_enc_avx512(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_192_enc_avx512(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_keyexp_256_enc_avx512(const void *key,
+ void *enc_exp_keys);
+IMB_DLL_EXPORT void aes_cmac_subkey_gen_avx512(const void *key_exp, void *key1,
+ void *key2);
+IMB_DLL_EXPORT void aes_cfb_128_one_avx512(void *out, const void *in,
+ const void *iv, const void *keys,
+ uint64_t len);
+
+/*
+ * Direct GCM API.
+ * Note that GCM is also availabe through job API.
+ */
+#ifndef NO_GCM
+/**
+ * @brief GCM-AES Encryption
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param out Ciphertext output. Encrypt in-place is allowed.
+ * @param in Plaintext input.
+ * @param len Length of data in Bytes for encryption.
+ * @param iv pointer to 12 byte IV structure. Internally, library
+ * concates 0x00000001 value to it.
+ * @param aad Additional Authentication Data (AAD).
+ * @param aad_len Length of AAD.
+ * @param auth_tag Authenticated Tag output.
+ * @param auth_tag_len Authenticated Tag Length in bytes (must be
+ * a multiple of 4 bytes). Valid values are
+ * 16 (most likely), 12 or 8.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+/**
+ * @brief GCM-AES Decryption
+ *
+ * @param key_data GCM expanded keys data
+ * @param context_data GCM operation context data
+ * @param out Plaintext output. Decrypt in-place is allowed.
+ * @param in Ciphertext input.
+ * @param len Length of data in Bytes for decryption.
+ * @param iv pointer to 12 byte IV structure. Internally, library
+ * concates 0x00000001 value to it.
+ * @param aad Additional Authentication Data (AAD).
+ * @param aad_len Length of AAD.
+ * @param auth_tag Authenticated Tag output.
+ * @param auth_tag_len Authenticated Tag Length in bytes (must be
+ * a multiple of 4 bytes). Valid values are
+ * 16 (most likely), 12 or 8.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, uint8_t const *in, uint64_t len,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+/**
+ * @brief Start a AES-GCM Encryption message
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param iv pointer to 12 byte IV structure. Internally, library
+ * concates 0x00000001 value to it.
+ * @param aad Additional Authentication Data (AAD).
+ * @param aad_len Length of AAD.
+ *
+ */
+IMB_DLL_EXPORT void
+aes_gcm_init_128_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_128_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_128_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_init_192_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_192_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_192_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_init_256_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv, uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_256_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+IMB_DLL_EXPORT void
+aes_gcm_init_256_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ const uint8_t *iv,
+ uint8_t const *aad, uint64_t aad_len);
+
+/**
+ * @brief encrypt a block of a AES-GCM Encryption message
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param out Ciphertext output. Encrypt in-place is allowed.
+ * @param in Plaintext input.
+ * @param len Length of data in Bytes for decryption.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+/**
+ * @brief decrypt a block of a AES-GCM Encryption message
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param out Plaintext output. Decrypt in-place is allowed.
+ * @param in Ciphertext input.
+ * @param len Length of data in Bytes for decryption.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_update_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *out, const uint8_t *in, uint64_t len);
+
+/**
+ * @brief End encryption of a AES-GCM Encryption message
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param auth_tag Authenticated Tag output.
+ * @param auth_tag_len Authenticated Tag Length in bytes (must be
+ * a multiple of 4 bytes). Valid values are
+ * 16 (most likely), 12 or 8.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_128_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_192_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_enc_256_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+/**
+ * @brief End decryption of a AES-GCM Encryption message
+ *
+ * @param key_data GCM expanded key data
+ * @param context_data GCM operation context data
+ * @param auth_tag Authenticated Tag output.
+ * @param auth_tag_len Authenticated Tag Length in bytes (must be
+ * a multiple of 4 bytes). Valid values are
+ * 16 (most likely), 12 or 8.
+ */
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_128_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_192_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_sse(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_avx_gen2(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+IMB_DLL_EXPORT void
+aes_gcm_dec_256_finalize_avx_gen4(const struct gcm_key_data *key_data,
+ struct gcm_context_data *context_data,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+/**
+ * @brief Precomputation of HashKey constants
+ *
+ * Precomputation of HashKey<<1 mod poly constants (shifted_hkey_X and
+ * shifted_hkey_X_k).
+ *
+ * @param gdata GCM context data
+ */
+IMB_DLL_EXPORT void aes_gcm_precomp_128_sse(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_128_avx_gen2(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_128_avx_gen4(struct gcm_key_data *key_data);
+
+IMB_DLL_EXPORT void aes_gcm_precomp_192_sse(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_192_avx_gen2(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_192_avx_gen4(struct gcm_key_data *key_data);
+
+IMB_DLL_EXPORT void aes_gcm_precomp_256_sse(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_256_avx_gen2(struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_precomp_256_avx_gen4(struct gcm_key_data *key_data);
+
+/**
+ * @brief Pre-processes GCM key data
+ *
+ * Prefills the gcm key data with key values for each round and
+ * the initial sub hash key for tag encoding
+ *
+ * @param key pointer to key data
+ * @param key_data GCM expanded key data
+ *
+ */
+IMB_DLL_EXPORT void aes_gcm_pre_128_sse(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_128_avx_gen2(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_128_avx_gen4(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_192_sse(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_192_avx_gen2(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_192_avx_gen4(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_256_sse(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_256_avx_gen2(const void *key,
+ struct gcm_key_data *key_data);
+IMB_DLL_EXPORT void aes_gcm_pre_256_avx_gen4(const void *key,
+ struct gcm_key_data *key_data);
+#endif /* !NO_GCM */
+
+/**
+ * @brief Generation of ZUC Initialization Vectors (for EEA3 and EIA3)
+ *
+ * @param [in] count COUNT (4 bytes in Little Endian)
+ * @param [in] bearer BEARER (5 bits)
+ * @param [in] dir DIRECTION (1 bit)
+ * @param [out] iv_ptr Pointer to generated IV (16 bytes)
+ *
+ * @return
+ * - 0 if success
+ * - 1 if one or more parameters are wrong
+ */
+IMB_DLL_EXPORT int zuc_eea3_iv_gen(const uint32_t count,
+ const uint8_t bearer,
+ const uint8_t dir,
+ void *iv_ptr);
+IMB_DLL_EXPORT int zuc_eia3_iv_gen(const uint32_t count,
+ const uint8_t bearer,
+ const uint8_t dir,
+ void *iv_ptr);
+
+/**
+ * @brief Generation of KASUMI F8 Initialization Vector
+ *
+ * @param [in] count COUNT (4 bytes in Little Endian)
+ * @param [in] bearer BEARER (5 bits)
+ * @param [in] dir DIRECTION (1 bit)
+ * @param [out] iv_ptr Pointer to generated IV (16 bytes)
+ *
+ * @return
+ * - 0 if success
+ * - 1 if one or more parameters are wrong
+ */
+IMB_DLL_EXPORT int kasumi_f8_iv_gen(const uint32_t count,
+ const uint8_t bearer,
+ const uint8_t dir,
+ void *iv_ptr);
+/**
+ * @brief Generation of KASUMI F9 Initialization Vector
+ *
+ * @param [in] count COUNT (4 bytes in Little Endian)
+ * @param [in] fresh FRESH (4 bytes in Little Endian)
+ * @param [out] iv_ptr Pointer to generated IV (16 bytes)
+ *
+ * @return
+ * - 0 if success
+ * - 1 if one or more parameters are wrong
+ */
+IMB_DLL_EXPORT int kasumi_f9_iv_gen(const uint32_t count,
+ const uint32_t fresh,
+ void *iv_ptr);
+
+/**
+ * @brief Generation of SNOW3G F8 Initialization Vector
+ *
+ * Parameters are passed in Little Endian format and
+ * used to generate the IV in Big Endian format
+ *
+ * @param [in] count COUNT (4 bytes in Little Endian)
+ * @param [in] bearer BEARER (5 bits)
+ * @param [in] dir DIRECTION (1 bit)
+ * @param [out] iv_ptr Pointer to generated IV (16 bytes) in Big Endian format
+ *
+ * @return
+ * - 0 if success
+ * - 1 if one or more parameters are wrong
+ */
+IMB_DLL_EXPORT int snow3g_f8_iv_gen(const uint32_t count,
+ const uint8_t bearer,
+ const uint8_t dir,
+ void *iv_ptr);
+/**
+ * @brief Generation of SNOW3G F9 Initialization Vector
+ *
+ * Parameters are passed in Little Endian format and
+ * used to generate the IV in Big Endian format
+ *
+ * @param [in] count COUNT (4 bytes in Little Endian)
+ * @param [in] fresh FRESH (4 bytes in Little Endian)
+ * @param [in] dir DIRECTION (1 bit)
+ * @param [out] iv_ptr Pointer to generated IV (16 bytes) in Big Endian format
+ *
+ * @return
+ * - 0 if success
+ * - 1 if one or more parameters are wrong
+ */
+IMB_DLL_EXPORT int snow3g_f9_iv_gen(const uint32_t count,
+ const uint32_t fresh,
+ const uint8_t dir,
+ void *iv_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* IMB_IPSEC_MB_H */
diff --git a/src/spdk/intel-ipsec-mb/intel-ipsec-mb.spec b/src/spdk/intel-ipsec-mb/intel-ipsec-mb.spec
new file mode 100644
index 000000000..b45614dcf
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/intel-ipsec-mb.spec
@@ -0,0 +1,110 @@
+# Copyright (c) 2017-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+%global githubname intel-ipsec-mb
+%global githubver 0.51
+%global githubfull %{githubname}-%{githubver}
+
+# disable producing debuginfo for this package
+%global debug_package %{nil}
+
+Summary: IPSEC cryptography library optimized for Intel Architecture
+Name: %{githubname}
+Release: 1%{?dist}
+Version: %{githubver}
+License: BSD
+Group: Development/Tools
+ExclusiveArch: x86_64
+Source0: https://github.com/intel/%{githubname}/archive/v%{githubver}.tar.gz
+URL: https://github.com/intel/%{githubname}
+BuildRequires: gcc, make, nasm
+
+%description
+IPSEC cryptography library optimized for Intel Architecture
+
+%package -n intel-ipsec-mb-devel
+Summary: IPSEC cryptography library optimized for Intel Architecture
+License: BSD
+Requires: intel-ipsec-mb == %{version}
+Group: Development/Tools
+ExclusiveArch: x86_64
+
+%description -n intel-ipsec-mb-devel
+IPSEC cryptography library optimized for Intel Architecture
+
+For additional information please refer to:
+https://github.com/intel/%{githubname}
+
+%prep
+%autosetup -n %{githubfull}
+
+%post -p /sbin/ldconfig
+
+%postun -p /sbin/ldconfig
+
+%build
+make %{?_smp_mflags}
+
+%install
+install -d %{buildroot}/%{_licensedir}/%{name}-%{version}
+install -m 0644 %{_builddir}/%{githubfull}/LICENSE %{buildroot}/%{_licensedir}/%{name}-%{version}
+
+# Install the library
+# - include directory not created in the 'install' target - workaround
+install -d %{buildroot}/%{_includedir}
+make install -C %{_builddir}/%{githubfull} PREFIX=%{_buildroot} HDR_DIR=%{buildroot}/%{_includedir} LIB_INSTALL_DIR=%{buildroot}/%{_libdir} MAN_DIR=%{buildroot}/%{_mandir}/man7 NOLDCONFIG=y
+# - workaround for no strip option in the 'install target'
+rm -f %{buildroot}/%{_libdir}/libIPSec_MB.so*
+install -s -m 0755 %{_builddir}/%{githubfull}/libIPSec_MB.so.%{version} %{buildroot}/%{_libdir}
+cd %{buildroot}/%{_libdir}
+ln -s libIPSec_MB.so.%{version} libIPSec_MB.so.0
+ln -s libIPSec_MB.so.%{version} libIPSec_MB.so
+
+%files
+
+%{!?_licensedir:%global license %%doc}
+%license %{_licensedir}/%{name}-%{version}/LICENSE
+%doc README ReleaseNotes.txt
+
+%{_libdir}/libIPSec_MB.so.%{version}
+%{_libdir}/libIPSec_MB.so.0
+%{_libdir}/libIPSec_MB.so
+
+%{_mandir}/man7/libipsec-mb.7.gz
+
+%files -n intel-ipsec-mb-devel
+%{_includedir}/intel-ipsec-mb.h
+%{_mandir}/man7/libipsec-mb-dev.7.gz
+
+%changelog
+* Thu Sep 13 2018 Marcel Cornu <marcel.d.cornu@intel.com> 0.51-1
+- Update for release package v0.51
+
+* Mon Apr 16 2018 Tomasz Kantecki <tomasz.kantecki@intel.com> 0.49-1
+- update for release package v0.49
+- 01org replaced with intel in URL's
+- use of new makefile 'install' target with some workarounds
+* Fri Aug 11 2017 Tomasz Kantecki <tomasz.kantecki@intel.com> 0.46-1
+- initial version of the package
diff --git a/src/spdk/intel-ipsec-mb/job_aes_hmac.asm b/src/spdk/intel-ipsec-mb/job_aes_hmac.asm
new file mode 100644
index 000000000..f1445ccc2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/job_aes_hmac.asm
@@ -0,0 +1,144 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/datastruct.asm"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define constants
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define STS_BEING_PROCESSED 0
+%define STS_COMPLETED_AES 1
+%define STS_COMPLETED_HMAC 2
+%define STS_COMPLETED 3
+%define STS_INVALID_ARGS 4
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define JOB_AES_HMAC structure
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; HMAC Specific Fields
+;;; name size align
+FIELD __auth_key_xor_ipad, 8, 8 ; pointer to ipad
+FIELD __auth_key_xor_opad, 8, 8 ; pointer to opad
+END_FIELDS
+
+%assign _HMAC_spec_fields_size _FIELD_OFFSET
+%assign _HMAC_spec_fields_align _STRUCT_ALIGN
+
+START_FIELDS ; AES XCBC Specific Fields
+;;; name size align
+FIELD __k1_expanded, 8, 8 ; ptr to exp k1 keys
+FIELD __k2, 8, 8 ; ptr to k2
+FIELD __k3, 8, 8 ; ptr to k3
+END_FIELDS
+
+%assign _AES_XCBC_spec_fields_size _FIELD_OFFSET
+%assign _AES_XCBC_spec_fields_align _STRUCT_ALIGN
+
+START_FIELDS ; CBCMAC Specific Fields
+;;; name size align
+FIELD __aad, 8, 8 ; pointer to AAD
+FIELD __aad_len, 8, 8 ; 64-bit AAD length
+END_FIELDS
+
+%assign _CBCMAC_spec_fields_size _FIELD_OFFSET
+%assign _CBCMAC_spec_fields_align _STRUCT_ALIGN
+
+START_FIELDS ; AES CMAC Specific Fields
+;;; name size align
+FIELD __key_expanded, 8, 8 ; ptr to exp keys
+FIELD __skey1, 8, 8 ; ptr to subkey 1
+FIELD __skey2, 8, 8 ; ptr to subkey 2
+END_FIELDS
+
+%assign _AES_CMAC_spec_fields_size _FIELD_OFFSET
+%assign _AES_CMAC_spec_fields_align _STRUCT_ALIGN
+
+START_FIELDS ; GCM Specific Fields
+;;; name size align
+FIELD __gcm_aad, 8, 8 ; pointer to AAD
+FIELD __gcm_aad_len, 8, 8 ; 64-bit AAD length
+END_FIELDS
+
+%assign _GCM_spec_fields_size _FIELD_OFFSET
+%assign _GCM_spec_fields_align _STRUCT_ALIGN
+
+
+START_FIELDS ; JOB_AES_HMAC
+;;; name size align
+FIELD _aes_enc_key_expanded, 8, 8 ; pointer to exp enc keys
+FIELD _aes_dec_key_expanded, 8, 8 ; pointer to exp dec keys
+FIELD _aes_key_len_in_bytes, 8, 8
+FIELD _src, 8, 8 ; pointer to src buffer
+FIELD _dst, 8, 8 ; pointer to dst buffer
+FIELD _cipher_start_src_offset_in_bytes, \
+ 8, 8
+FIELD _msg_len_to_cipher, 8, 8
+FIELD _hash_start_src_offset_in_bytes,8, 8
+FIELD _msg_len_to_hash, 8, 8
+FIELD _iv, 8, 8 ; pointer to IV
+FIELD _iv_len_in_bytes, 8, 8
+FIELD _auth_tag_output, 8, 8 ; pointer to hash output
+FIELD _auth_tag_output_len_in_bytes, 8, 8
+
+UNION _u, _HMAC_spec_fields_size, _HMAC_spec_fields_align, \
+ _AES_XCBC_spec_fields_size, _AES_XCBC_spec_fields_align, \
+ _CBCMAC_spec_fields_size, _CBCMAC_spec_fields_align, \
+ _AES_CMAC_spec_fields_size, _AES_CMAC_spec_fields_align, \
+ _GCM_spec_fields_size, _GCM_spec_fields_align
+
+FIELD _status, 4, 4 ; JOB_STS
+FIELD _cipher_mode, 4, 4 ; JOB_CIPHER_MODE
+FIELD _cipher_direction, 4, 4 ; JOB_CIPHER_DIRECTION
+FIELD _hash_alg, 4, 4 ; JOB_HASH_ALG
+FIELD _chain_order, 4, 4 ; JOB_CHAIN_ORDER
+FIELD _user_data, 8, 8
+FIELD _user_data2, 8, 8
+END_FIELDS
+
+%assign _msg_len_to_cipher_in_bytes _msg_len_to_cipher
+%assign _msg_len_to_cipher_in_bits _msg_len_to_cipher
+%assign _msg_len_to_hash_in_bytes _msg_len_to_hash
+%assign _msg_len_to_hash_in_bits _msg_len_to_hash
+
+%assign _JOB_AES_HMAC_size _FIELD_OFFSET
+%assign _JOB_AES_HMAC_align _STRUCT_ALIGN
+
+%assign _auth_key_xor_ipad _u + __auth_key_xor_ipad
+%assign _auth_key_xor_opad _u + __auth_key_xor_opad
+%assign _k1_expanded _u + __k1_expanded
+%assign _k2 _u + __k2
+%assign _k3 _u + __k3
+%assign _cbcmac_aad _u + __aad
+%assign _cbcmac_aad_len _u + __aad_len
+%assign _key_expanded _u + __key_expanded
+%assign _skey1 _u + __skey1
+%assign _skey2 _u + __skey2
+%assign _gcm_aad _u + __gcm_aad
+%assign _gcm_aad_len _u + __gcm_aad_len
+
diff --git a/src/spdk/intel-ipsec-mb/kasumi_iv.c b/src/spdk/intel-ipsec-mb/kasumi_iv.c
new file mode 100644
index 000000000..0d0149205
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/kasumi_iv.c
@@ -0,0 +1,79 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "include/wireless_common.h"
+
+int
+kasumi_f8_iv_gen(const uint32_t count, const uint8_t bearer,
+ const uint8_t dir, void *iv_ptr)
+{
+ uint8_t *iv = (uint8_t *) iv_ptr;
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv_ptr == NULL)
+ return -1;
+
+ /* Bearer must contain 5 bits only */
+ if (bearer >= (1<<5))
+ return -1;
+
+ /* Direction must contain 1 bit only */
+ if (dir > 1)
+ return -1;
+
+ /* IV[0-3] = COUNT */
+ iv32[0] = bswap4(count);
+
+ /* IV[4] = BEARER || DIRECTION || 0s */
+ iv[4] = (bearer << 3) + (dir << 2);
+
+ /* IV[5-7] = Os */
+ memset(&iv[5], 0, 3);
+
+ return 0;
+}
+
+int
+kasumi_f9_iv_gen(const uint32_t count, const uint32_t fresh,
+ void *iv_ptr)
+{
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv_ptr == NULL)
+ return -1;
+
+ /* IV[0-3] = COUNT */
+ iv32[0] = bswap4(count);
+
+ /* IV[4-7] = FRESH */
+ iv32[0] = bswap4(fresh);
+
+ return 0;
+}
diff --git a/src/spdk/intel-ipsec-mb/libIPSec_MB.def b/src/spdk/intel-ipsec-mb/libIPSec_MB.def
new file mode 100644
index 000000000..e1958f4b6
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/libIPSec_MB.def
@@ -0,0 +1,398 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+LIBRARY libIPSec_MB.dll
+EXPORTS
+ aes_gcm_dec_128_avx_gen2 @1
+ aes_gcm_dec_128_avx_gen4 @2
+ aes_gcm_dec_128_finalize_avx_gen2 @3
+ aes_gcm_dec_128_finalize_avx_gen4 @4
+ aes_gcm_dec_128_finalize_sse @5
+ aes_gcm_dec_128_sse @6
+ aes_gcm_dec_128_update_avx_gen2 @7
+ aes_gcm_dec_128_update_avx_gen4 @8
+ aes_gcm_dec_128_update_sse @9
+ aes_gcm_dec_192_avx_gen2 @10
+ aes_gcm_dec_192_avx_gen4 @11
+ aes_gcm_dec_192_finalize_avx_gen2 @12
+ aes_gcm_dec_192_finalize_avx_gen4 @13
+ aes_gcm_dec_192_finalize_sse @14
+ aes_gcm_dec_192_sse @15
+ aes_gcm_dec_192_update_avx_gen2 @16
+ aes_gcm_dec_192_update_avx_gen4 @17
+ aes_gcm_dec_192_update_sse @18
+ aes_gcm_dec_256_avx_gen2 @19
+ aes_gcm_dec_256_avx_gen4 @20
+ aes_gcm_dec_256_finalize_avx_gen2 @21
+ aes_gcm_dec_256_finalize_avx_gen4 @22
+ aes_gcm_dec_256_finalize_sse @23
+ aes_gcm_dec_256_sse @24
+ aes_gcm_dec_256_update_avx_gen2 @25
+ aes_gcm_dec_256_update_avx_gen4 @26
+ aes_gcm_dec_256_update_sse @27
+ aes_gcm_enc_128_avx_gen2 @28
+ aes_gcm_enc_128_avx_gen4 @29
+ aes_gcm_enc_128_finalize_avx_gen2 @30
+ aes_gcm_enc_128_finalize_avx_gen4 @31
+ aes_gcm_enc_128_finalize_sse @32
+ aes_gcm_enc_128_sse @33
+ aes_gcm_enc_128_update_avx_gen2 @34
+ aes_gcm_enc_128_update_avx_gen4 @35
+ aes_gcm_enc_128_update_sse @36
+ aes_gcm_enc_192_avx_gen2 @37
+ aes_gcm_enc_192_avx_gen4 @38
+ aes_gcm_enc_192_finalize_avx_gen2 @39
+ aes_gcm_enc_192_finalize_avx_gen4 @40
+ aes_gcm_enc_192_finalize_sse @41
+ aes_gcm_enc_192_sse @42
+ aes_gcm_enc_192_update_avx_gen2 @43
+ aes_gcm_enc_192_update_avx_gen4 @44
+ aes_gcm_enc_192_update_sse @45
+ aes_gcm_enc_256_avx_gen2 @46
+ aes_gcm_enc_256_avx_gen4 @47
+ aes_gcm_enc_256_finalize_avx_gen2 @48
+ aes_gcm_enc_256_finalize_avx_gen4 @49
+ aes_gcm_enc_256_finalize_sse @50
+ aes_gcm_enc_256_sse @51
+ aes_gcm_enc_256_update_avx_gen2 @52
+ aes_gcm_enc_256_update_avx_gen4 @53
+ aes_gcm_enc_256_update_sse @54
+ aes_gcm_init_128_avx_gen2 @55
+ aes_gcm_init_128_avx_gen4 @56
+ aes_gcm_init_128_sse @57
+ aes_gcm_init_192_avx_gen2 @58
+ aes_gcm_init_192_avx_gen4 @59
+ aes_gcm_init_192_sse @60
+ aes_gcm_init_256_avx_gen2 @61
+ aes_gcm_init_256_avx_gen4 @62
+ aes_gcm_init_256_sse @63
+ aes_gcm_precomp_128_avx_gen2 @64
+ aes_gcm_precomp_128_avx_gen4 @65
+ aes_gcm_precomp_128_sse @66
+ aes_gcm_precomp_192_avx_gen2 @67
+ aes_gcm_precomp_192_avx_gen4 @68
+ aes_gcm_precomp_192_sse @69
+ aes_gcm_precomp_256_avx_gen2 @70
+ aes_gcm_precomp_256_avx_gen4 @71
+ aes_gcm_precomp_256_sse @72
+ aes_keyexp_128_avx @73
+ aes_keyexp_128_enc_avx @74
+ aes_keyexp_128_enc_sse @75
+ aes_keyexp_128_sse @76
+ aes_keyexp_192_avx @77
+ aes_keyexp_192_enc_avx @78
+ aes_keyexp_192_enc_sse @79
+ aes_keyexp_192_sse @80
+ aes_keyexp_256_avx @81
+ aes_keyexp_256_enc_avx @82
+ aes_keyexp_256_enc_sse @83
+ aes_keyexp_256_sse @84
+ aes_xcbc_expand_key_avx @85
+ aes_xcbc_expand_key_sse @86
+ des_key_schedule @87
+ flush_job_avx @88
+ flush_job_avx2 @89
+ flush_job_avx512 @90
+ flush_job_sse @91
+ init_mb_mgr_avx @92
+ init_mb_mgr_avx2 @93
+ init_mb_mgr_avx512 @94
+ init_mb_mgr_sse @95
+ md5_one_block_sse @96
+ queue_size_avx @97
+ queue_size_avx2 @98
+ queue_size_avx512 @99
+ queue_size_sse @100
+ sha1_one_block_avx @101
+ sha1_one_block_sse @102
+ sha224_one_block_avx @103
+ sha224_one_block_sse @104
+ sha256_one_block_avx @105
+ sha256_one_block_sse @106
+ sha384_one_block_avx @107
+ sha384_one_block_sse @108
+ sha512_one_block_avx @109
+ sha512_one_block_sse @110
+; sse_sha_ext_usage @111 ## deprecated
+ submit_job_avx @112
+ submit_job_avx2 @113
+ submit_job_avx512 @114
+ submit_job_nocheck_avx @115
+ submit_job_nocheck_avx2 @116
+ submit_job_nocheck_avx512 @117
+ submit_job_nocheck_sse @118
+ submit_job_sse @119
+ aes_cmac_subkey_gen_sse @120
+ aes_cmac_subkey_gen_avx @121
+ alloc_mb_mgr @122
+ free_mb_mgr @123
+ aes_cfb_128_one_sse @124
+ aes_cfb_128_one_avx @125
+ sha1_sse @126
+ sha1_avx @127
+ sha1_avx2 @128
+ sha1_avx512 @129
+ sha1_one_block_avx2 @130
+ sha1_one_block_avx512 @131
+ md5_one_block_avx @132
+ md5_one_block_avx2 @133
+ md5_one_block_avx512 @134
+ sha224_one_block_avx2 @135
+ sha224_one_block_avx512 @136
+ sha256_one_block_avx2 @137
+ sha256_one_block_avx512 @138
+ sha384_one_block_avx2 @139
+ sha384_one_block_avx512 @140
+ sha512_one_block_avx2 @141
+ sha512_one_block_avx512 @142
+ get_next_job_sse @143
+ get_next_job_avx @144
+ get_next_job_avx2 @145
+ get_next_job_avx512 @146
+ get_completed_job_sse @147
+ get_completed_job_avx @148
+ get_completed_job_avx2 @149
+ get_completed_job_avx512 @150
+ aes_keyexp_128_enc_avx2 @151
+ aes_keyexp_128_enc_avx512 @152
+ aes_keyexp_192_enc_avx2 @153
+ aes_keyexp_192_enc_avx512 @154
+ aes_keyexp_256_enc_avx2 @155
+ aes_keyexp_256_enc_avx512 @156
+ aes_xcbc_expand_key_avx2 @157
+ aes_xcbc_expand_key_avx512 @158
+ aes_cmac_subkey_gen_avx2 @159
+ aes_cmac_subkey_gen_avx512 @160
+ aes_cfb_128_one_avx2 @161
+ aes_cfb_128_one_avx512 @162
+ aes_keyexp_128_avx2 @163
+ aes_keyexp_128_avx512 @164
+ aes_keyexp_192_avx2 @165
+ aes_keyexp_192_avx512 @166
+ aes_keyexp_256_avx2 @167
+ aes_keyexp_256_avx512 @168
+ imb_get_version_str @169
+ imb_get_version @170
+ init_mb_mgr_sse_no_aesni @171
+ submit_job_sse_no_aesni @172
+ submit_job_nocheck_sse_no_aesni @173
+ flush_job_sse_no_aesni @174
+ queue_size_sse_no_aesni @175
+ get_completed_job_sse_no_aesni @176
+ get_next_job_sse_no_aesni @177
+ aes_cfb_128_one_sse_no_aesni @178
+ aes_gcm_dec_128_finalize_sse_no_aesni @179
+ aes_gcm_dec_128_sse_no_aesni @180
+ aes_gcm_dec_128_update_sse_no_aesni @181
+ aes_gcm_dec_192_finalize_sse_no_aesni @182
+ aes_gcm_dec_192_sse_no_aesni @183
+ aes_gcm_dec_192_update_sse_no_aesni @184
+ aes_gcm_dec_256_finalize_sse_no_aesni @185
+ aes_gcm_dec_256_sse_no_aesni @186
+ aes_gcm_dec_256_update_sse_no_aesni @187
+ aes_gcm_enc_128_finalize_sse_no_aesni @188
+ aes_gcm_enc_128_sse_no_aesni @189
+ aes_gcm_enc_128_update_sse_no_aesni @190
+ aes_gcm_enc_192_finalize_sse_no_aesni @191
+ aes_gcm_enc_192_sse_no_aesni @192
+ aes_gcm_enc_192_update_sse_no_aesni @193
+ aes_gcm_enc_256_finalize_sse_no_aesni @194
+ aes_gcm_enc_256_sse_no_aesni @195
+ aes_gcm_enc_256_update_sse_no_aesni @196
+ aes_gcm_init_128_sse_no_aesni @197
+ aes_gcm_init_192_sse_no_aesni @198
+ aes_gcm_init_256_sse_no_aesni @199
+ aes_gcm_precomp_128_sse_no_aesni @200
+ aes_gcm_precomp_192_sse_no_aesni @201
+ aes_gcm_precomp_256_sse_no_aesni @202
+ aes_keyexp_128_enc_sse_no_aesni @203
+ aes_keyexp_128_sse_no_aesni @204
+ aes_keyexp_192_enc_sse_no_aesni @205
+ aes_keyexp_192_sse_no_aesni @206
+ aes_keyexp_256_enc_sse_no_aesni @207
+ aes_keyexp_256_sse_no_aesni @208
+ aes_xcbc_expand_key_sse_no_aesni @209
+ aes_cmac_subkey_gen_sse_no_aesni @210
+ aes_gcm_pre_128_sse_no_aesni @211
+ aes_gcm_pre_192_sse_no_aesni @212
+ aes_gcm_pre_256_sse_no_aesni @213
+ aes_gcm_pre_128_sse @214
+ aes_gcm_pre_192_sse @215
+ aes_gcm_pre_256_sse @216
+ aes_gcm_pre_128_avx_gen2 @217
+ aes_gcm_pre_192_avx_gen2 @218
+ aes_gcm_pre_256_avx_gen2 @219
+ aes_gcm_pre_128_avx_gen4 @220
+ aes_gcm_pre_192_avx_gen4 @221
+ aes_gcm_pre_256_avx_gen4 @222
+ aes_gcm_pre_128_avx512 @223
+ aes_gcm_pre_192_avx512 @224
+ aes_gcm_pre_256_avx512 @225
+ aes_gcm_pre_128_vaes_avx512 @226
+ aes_gcm_pre_192_vaes_avx512 @227
+ aes_gcm_pre_256_vaes_avx512 @228
+ aes_gcm_dec_128_finalize_avx512 @229
+ aes_gcm_dec_128_avx512 @230
+ aes_gcm_dec_128_update_avx512 @231
+ aes_gcm_dec_192_finalize_avx512 @232
+ aes_gcm_dec_192_avx512 @233
+ aes_gcm_dec_192_update_avx512 @234
+ aes_gcm_dec_256_finalize_avx512 @235
+ aes_gcm_dec_256_avx512 @236
+ aes_gcm_dec_256_update_avx512 @237
+ aes_gcm_enc_128_finalize_avx512 @238
+ aes_gcm_enc_128_avx512 @239
+ aes_gcm_enc_128_update_avx512 @240
+ aes_gcm_enc_192_finalize_avx512 @241
+ aes_gcm_enc_192_avx512 @242
+ aes_gcm_enc_192_update_avx512 @243
+ aes_gcm_enc_256_finalize_avx512 @244
+ aes_gcm_enc_256_avx512 @245
+ aes_gcm_enc_256_update_avx512 @246
+ aes_gcm_init_128_avx512 @247
+ aes_gcm_init_192_avx512 @248
+ aes_gcm_init_256_avx512 @249
+ aes_gcm_precomp_128_avx512 @250
+ aes_gcm_precomp_192_avx512 @251
+ aes_gcm_precomp_256_avx512 @252
+ aes_gcm_dec_128_finalize_vaes_avx512 @253
+ aes_gcm_dec_128_vaes_avx512 @254
+ aes_gcm_dec_128_update_vaes_avx512 @255
+ aes_gcm_dec_192_finalize_vaes_avx512 @256
+ aes_gcm_dec_192_vaes_avx512 @257
+ aes_gcm_dec_192_update_vaes_avx512 @258
+ aes_gcm_dec_256_finalize_vaes_avx512 @259
+ aes_gcm_dec_256_vaes_avx512 @260
+ aes_gcm_dec_256_update_vaes_avx512 @261
+ aes_gcm_enc_128_finalize_vaes_avx512 @262
+ aes_gcm_enc_128_vaes_avx512 @263
+ aes_gcm_enc_128_update_vaes_avx512 @264
+ aes_gcm_enc_192_finalize_vaes_avx512 @265
+ aes_gcm_enc_192_vaes_avx512 @266
+ aes_gcm_enc_192_update_vaes_avx512 @267
+ aes_gcm_enc_256_finalize_vaes_avx512 @268
+ aes_gcm_enc_256_vaes_avx512 @269
+ aes_gcm_enc_256_update_vaes_avx512 @270
+ aes_gcm_init_128_vaes_avx512 @271
+ aes_gcm_init_192_vaes_avx512 @272
+ aes_gcm_init_256_vaes_avx512 @273
+ aes_gcm_precomp_128_vaes_avx512 @274
+ aes_gcm_precomp_192_vaes_avx512 @275
+ aes_gcm_precomp_256_vaes_avx512 @276
+ sha224_sse @277
+ sha224_avx @278
+ sha224_avx2 @279
+ sha224_avx512 @280
+ sha256_sse @281
+ sha256_avx @282
+ sha256_avx2 @283
+ sha256_avx512 @284
+ sha384_sse @285
+ sha384_avx @286
+ sha384_avx2 @287
+ sha384_avx512 @288
+ sha512_sse @289
+ sha512_avx @290
+ sha512_avx2 @291
+ sha512_avx512 @292
+ zuc_eea3_iv_gen @293
+ zuc_eia3_iv_gen @294
+ zuc_eea3_1_buffer_avx @295
+ zuc_eea3_1_buffer_sse @296
+ zuc_eea3_4_buffer_avx @297
+ zuc_eea3_4_buffer_sse @298
+ zuc_eea3_n_buffer_avx @299
+ zuc_eea3_n_buffer_sse @300
+ zuc_eia3_1_buffer_avx @301
+ zuc_eia3_1_buffer_sse @302
+ snow3g_f8_iv_gen @303
+ snow3g_f9_iv_gen @304
+ snow3g_f8_1_buffer_avx @305
+ snow3g_f8_1_buffer_avx2 @306
+ snow3g_f8_1_buffer_bit_avx @307
+ snow3g_f8_1_buffer_bit_avx2 @308
+ snow3g_f8_1_buffer_bit_sse @309
+ snow3g_f8_1_buffer_bit_sse_no_aesni @310
+ snow3g_f8_1_buffer_sse @311
+ snow3g_f8_1_buffer_sse_no_aesni @312
+ snow3g_f8_2_buffer_avx @313
+ snow3g_f8_2_buffer_avx2 @314
+ snow3g_f8_2_buffer_sse @315
+ snow3g_f8_2_buffer_sse_no_aesni @316
+ snow3g_f8_4_buffer_avx @317
+ snow3g_f8_4_buffer_avx2 @318
+ snow3g_f8_4_buffer_sse @319
+ snow3g_f8_4_buffer_sse_no_aesni @320
+ snow3g_f8_8_buffer_avx @321
+ snow3g_f8_8_buffer_avx2 @322
+ snow3g_f8_8_buffer_multikey_avx @323
+ snow3g_f8_8_buffer_multikey_avx2 @324
+ snow3g_f8_8_buffer_multikey_sse @325
+ snow3g_f8_8_buffer_multikey_sse_no_aesni @326
+ snow3g_f8_8_buffer_sse @327
+ snow3g_f8_8_buffer_sse_no_aesni @328
+ snow3g_f8_n_buffer_avx @329
+ snow3g_f8_n_buffer_avx2 @330
+ snow3g_f8_n_buffer_multikey_avx @331
+ snow3g_f8_n_buffer_multikey_avx2 @332
+ snow3g_f8_n_buffer_multikey_sse @333
+ snow3g_f8_n_buffer_multikey_sse_no_aesni @334
+ snow3g_f8_n_buffer_sse @335
+ snow3g_f8_n_buffer_sse_no_aesni @336
+ snow3g_f9_1_buffer_avx @337
+ snow3g_f9_1_buffer_avx2 @338
+ snow3g_f9_1_buffer_sse @339
+ snow3g_f9_1_buffer_sse_no_aesni @340
+ snow3g_init_key_sched_avx @341
+ snow3g_init_key_sched_avx2 @342
+ snow3g_init_key_sched_sse @343
+ snow3g_init_key_sched_sse_no_aesni @344
+ snow3g_key_sched_size_avx @345
+ snow3g_key_sched_size_avx2 @346
+ snow3g_key_sched_size_sse @347
+ snow3g_key_sched_size_sse_no_aesni @348
+ kasumi_f8_iv_gen @349
+ kasumi_f9_iv_gen @350
+ kasumi_f8_1_buffer_avx @351
+ kasumi_f8_1_buffer_bit_avx @352
+ kasumi_f8_1_buffer_bit_sse @353
+ kasumi_f8_1_buffer_sse @354
+ kasumi_f8_2_buffer_avx @355
+ kasumi_f8_2_buffer_sse @356
+ kasumi_f8_3_buffer_avx @357
+ kasumi_f8_3_buffer_sse @358
+ kasumi_f8_4_buffer_avx @359
+ kasumi_f8_4_buffer_sse @360
+ kasumi_f8_n_buffer_avx @361
+ kasumi_f8_n_buffer_sse @362
+ kasumi_f9_1_buffer_avx @363
+ kasumi_f9_1_buffer_sse @364
+ kasumi_f9_1_buffer_user_avx @365
+ kasumi_f9_1_buffer_user_sse @366
+ kasumi_init_f8_key_sched_avx @367
+ kasumi_init_f8_key_sched_sse @368
+ kasumi_init_f9_key_sched_avx @369
+ kasumi_init_f9_key_sched_sse @370
+ kasumi_key_sched_size_avx @371
+ kasumi_key_sched_size_sse @372
diff --git a/src/spdk/intel-ipsec-mb/libipsec-mb-dev.7 b/src/spdk/intel-ipsec-mb/libipsec-mb-dev.7
new file mode 100644
index 000000000..7986866cc
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/libipsec-mb-dev.7
@@ -0,0 +1 @@
+.so man7/libipsec-mb.7
diff --git a/src/spdk/intel-ipsec-mb/libipsec-mb.7 b/src/spdk/intel-ipsec-mb/libipsec-mb.7
new file mode 100644
index 000000000..7cbadabec
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/libipsec-mb.7
@@ -0,0 +1,144 @@
+.\" Hey, EMACS: -*- nroff -*-
+.\" Copyright (c) 2018, Intel Corporation, written by Tomasz Kantecki
+.\" <tomasz.kantecki@intel.com>
+.\"
+.\" %%%LICENSE_START(VERBATIM)
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" * Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\" * Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" * Neither the name of Intel Corporation nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+.\" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+.\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+.\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+.\" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+.\" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\" %%%LICENSE_END
+.\"
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH libipsec-mb 7 2018-03-01 "Linux" "Linux Programmer's Manual"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh disable hyphenation
+.\" .hy enable hyphenation
+.\" .ad l left justify
+.\" .ad b justify to both left and right margins
+.\" .nf disable filling
+.\" .fi enable filling
+.\" .br insert line break
+.\" .sp <n> insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+libipsec-mb \- overview of Intel(R) Multi-Buffer Crypto for IPSec library
+.br
+.SH DESCRIPTION
+Intel Multi-Buffer Crypto for IPsec Library is highly-optimized
+software implementations of the core cryptographic processing for IPsec,
+which provides industry-leading performance on a range of Intel(R) Processors.
+.br
+The library offers API crafted for IPsec applications where a network packet
+is subject of encryption and integrity operations.
+.br
+For best processor utilization it uses multi buffer technology for algorithms
+that don't allow multi block processing.
+.br
+See more in the Intel White Paper:
+"Fast Multi-buffer IPsec Implementations on Intel Architecture Processors".
+Jim Guilford, Sean Gulley, et. al.
+.br
+The easiest way to find it is to search the Internet for the title.
+
+
+.br
+More information can be found at
+.UR https://github.com/intel/intel-ipsec-mb
+.I intel-ipsec-mb
+.UE .
+
+.SS ENCRYPTION ALGORITHMS
+AES128-GCM, AES192-GCM, AES256-GCM, AES128-CBC, AES192-CBC, AES256-CBC,
+AES128-CTR, AES192-CTR, AES256-CTR, AES128-CCM, NULL, AES128-DOCSIS, DES-DOCSIS, 3DES and DES.
+
+.SS INTEGRITY ALGORITHMS
+AES-XCBC-96, HMAC-MD5-96, HMAC-SHA1-96, HMAC-SHA2-224_112, HMAC-SHA2-256_128,
+HMAC-SHA2-384_192, HMAC-SHA2-512_256, AES128-GMAC, AES192-GMAC, AES256-GMAC, NULL,
+AES128-CCM, AES128-CMAC-96.
+
+.SS RECOMMENDATIONS
+DES algorithm should be avoided and AES encryption should be used instead.
+.br
+3DES is a legacy algorithms and AES encryption is recommended to use instead.
+.br
+HMAC-MD5-96 is a legacy algorithm and HMAC-SHA1 is recommended to use instead.
+.br
+DES, 3DES and HMAC-MD5-96 are implemented in the library to support legacy applications.
+
+.SS KEY STORAGE
+The library doesn't offer any solution for secure key storage or storage of sensitive information.
+It is up to application to arrange safe storage of sensitive information.
+
+.SS API
+The library offers four sets of identical API's. Each set corresponds to one architecture: SSE, AVX, AVX2 and AVX512.
+.br
+It comprises of init_mb_mgr, get_next_job, submit_job, get_completed_job and flush_job operations.
+.br
+.nf
+init_mb_mgr() \- initializes the instance of the multi-buffer manager
+get_next_job() \- obtains pointer to next available job entry
+submit_job() \- submits job for processing
+get_completed_job() \- returns already completed jobs
+flush_job() \- flushes multi-buffer manager and completes any outstanding operations.
+.fi
+
+The basic usage of the API is presented by this pseudo code:
+.br
+.nf
+
+ init_mb_mgr(&mb_mgr);
+ ...
+ while (work_to_be_done) {
+ job = get_next_job(&mb_mgr);
+ /* TODO: Fill in job fields */
+ job = submit_job(&mb_mgr);
+ while (job != NULL) {
+ /* TODO: Complete processing on job */
+ job = get_completed_job(&mb_mgr);
+ }
+ }
+
+ job = flush_job(&mb_mgr);
+ while (job != NULL) {
+ /* TODO: Complete processing on job */
+ job = flush_job(&mb_mgr);
+ }
+
+.fi
+The library provides more interface functions i.e. single block hash functions for HMAC OPAD and IPAD calculations.
+.SH BUGS
+Report bugs at
+.UR https://github.com/intel/intel-ipsec-mb/issues
+.I intel-ipsec-mb/issues
+.UE .
+
+.SH AUTHORS
+Jim Guilford <james.guilford@intel.com>, Tomasz Kantecki <tomasz.kantecki@intel.com>
+
+.P
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
diff --git a/src/spdk/intel-ipsec-mb/mb_mgr_code.h b/src/spdk/intel-ipsec-mb/mb_mgr_code.h
new file mode 100644
index 000000000..1b8857826
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/mb_mgr_code.h
@@ -0,0 +1,1770 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/*
+ * This contains the bulk of the mb_mgr code, with #define's to build
+ * an SSE, AVX, AVX2 or AVX512 version (see mb_mgr_sse.c, mb_mgr_avx.c, etc.)
+ *
+ * get_next_job() returns a job object. This must be filled in and returned
+ * via submit_job() before get_next_job() is called again.
+ *
+ * submit_job() and flush_job() returns a job object. This job object ceases
+ * to be usable at the next call to get_next_job()
+ */
+
+#include <string.h> /* memcpy(), memset() */
+
+#include "include/clear_regs_mem.h"
+
+/*
+ * JOBS() and ADV_JOBS() moved into mb_mgr_code.h
+ * get_next_job() and get_completed_job() API's are no longer inlines.
+ * For binary compatibility they have been made proper symbols.
+ */
+__forceinline
+JOB_AES_HMAC *JOBS(MB_MGR *state, const int offset)
+{
+ char *cp = (char *)state->jobs;
+
+ return (JOB_AES_HMAC *)(cp + offset);
+}
+
+__forceinline
+void ADV_JOBS(int *ptr)
+{
+ *ptr += sizeof(JOB_AES_HMAC);
+ if (*ptr >= (int) (MAX_JOBS * sizeof(JOB_AES_HMAC)))
+ *ptr = 0;
+}
+
+/* ========================================================================= */
+/* Lower level "out of order" schedulers */
+/* ========================================================================= */
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES128_DEC(JOB_AES_HMAC *job)
+{
+ AES_CBC_DEC_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES192_DEC(JOB_AES_HMAC *job)
+{
+ AES_CBC_DEC_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES256_DEC(JOB_AES_HMAC *job)
+{
+ AES_CBC_DEC_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_128_ENC(JOB_AES_HMAC *job)
+{
+ AES_ECB_ENC_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_192_ENC(JOB_AES_HMAC *job)
+{
+ AES_ECB_ENC_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_256_ENC(JOB_AES_HMAC *job)
+{
+ AES_ECB_ENC_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_128_DEC(JOB_AES_HMAC *job)
+{
+ AES_ECB_DEC_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_192_DEC(JOB_AES_HMAC *job)
+{
+ AES_ECB_DEC_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ECB_256_DEC(JOB_AES_HMAC *job)
+{
+ AES_ECB_DEC_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->aes_dec_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes & (~15));
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ========================================================================= */
+/* Custom hash / cipher */
+/* ========================================================================= */
+
+__forceinline
+JOB_AES_HMAC *
+JOB_CUSTOM_CIPHER(JOB_AES_HMAC *job)
+{
+ if (!(job->status & STS_COMPLETED_AES)) {
+ if (job->cipher_func(job))
+ job->status = STS_INTERNAL_ERROR;
+ else
+ job->status |= STS_COMPLETED_AES;
+ }
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_CUSTOM_CIPHER(JOB_AES_HMAC *job)
+{
+ return JOB_CUSTOM_CIPHER(job);
+}
+
+__forceinline
+JOB_AES_HMAC *
+FLUSH_JOB_CUSTOM_CIPHER(JOB_AES_HMAC *job)
+{
+ return JOB_CUSTOM_CIPHER(job);
+}
+
+__forceinline
+JOB_AES_HMAC *
+JOB_CUSTOM_HASH(JOB_AES_HMAC *job)
+{
+ if (!(job->status & STS_COMPLETED_HMAC)) {
+ if (job->hash_func(job))
+ job->status = STS_INTERNAL_ERROR;
+ else
+ job->status |= STS_COMPLETED_HMAC;
+ }
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_CUSTOM_HASH(JOB_AES_HMAC *job)
+{
+ return JOB_CUSTOM_HASH(job);
+}
+
+__forceinline
+JOB_AES_HMAC *
+FLUSH_JOB_CUSTOM_HASH(JOB_AES_HMAC *job)
+{
+ return JOB_CUSTOM_HASH(job);
+}
+
+/* ========================================================================= */
+/* DOCSIS AES (AES128 CBC + AES128 CFB) */
+/* ========================================================================= */
+
+#define AES_BLOCK_SIZE 16
+
+/**
+ * @brief Encrypts/decrypts the last partial block for DOCSIS SEC v3.1 BPI
+ *
+ * The last partial block is encrypted/decrypted using AES CFB128.
+ * IV is always the next last ciphered block.
+ *
+ * @note It is assumed that length is bigger than one AES 128 block.
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DOCSIS_LAST_BLOCK(JOB_AES_HMAC *job)
+{
+ const void *iv = NULL;
+ uint64_t offset = 0;
+ uint64_t partial_bytes = 0;
+
+ if (job == NULL)
+ return job;
+
+ IMB_ASSERT((job->cipher_direction == DECRYPT) ||
+ (job->status & STS_COMPLETED_AES));
+
+ partial_bytes = job->msg_len_to_cipher_in_bytes & (AES_BLOCK_SIZE - 1);
+ offset = job->msg_len_to_cipher_in_bytes & (~(AES_BLOCK_SIZE - 1));
+
+ if (!partial_bytes)
+ return job;
+
+ /* in either case IV has to be next last ciphered block */
+ if (job->cipher_direction == ENCRYPT)
+ iv = job->dst + offset - AES_BLOCK_SIZE;
+ else
+ iv = job->src + job->cipher_start_src_offset_in_bytes +
+ offset - AES_BLOCK_SIZE;
+
+ IMB_ASSERT(partial_bytes <= AES_BLOCK_SIZE);
+ AES_CFB_128_ONE(job->dst + offset,
+ job->src + job->cipher_start_src_offset_in_bytes +
+ offset,
+ iv, job->aes_enc_key_expanded, partial_bytes);
+
+ return job;
+}
+
+/**
+ * @brief Encrypts/decrypts the first and only partial block for
+ * DOCSIS SEC v3.1 BPI
+ *
+ * The first partial block is encrypted/decrypted using AES CFB128.
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DOCSIS_FIRST_BLOCK(JOB_AES_HMAC *job)
+{
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ IMB_ASSERT(job->msg_len_to_cipher_in_bytes <= AES_BLOCK_SIZE);
+ AES_CFB_128_ONE(job->dst,
+ job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv, job->aes_enc_key_expanded,
+ job->msg_len_to_cipher_in_bytes);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ========================================================================= */
+/* DES, 3DES and DOCSIS DES (DES CBC + DES CFB) */
+/* ========================================================================= */
+
+/**
+ * @brief DOCSIS DES cipher encryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DOCSIS_DES_ENC(JOB_AES_HMAC *job)
+{
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ docsis_des_enc_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ (int) job->msg_len_to_cipher_in_bytes,
+ job->aes_enc_key_expanded,
+ (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/**
+ * @brief DOCSIS DES cipher decryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DOCSIS_DES_DEC(JOB_AES_HMAC *job)
+{
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ docsis_des_dec_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ (int) job->msg_len_to_cipher_in_bytes,
+ job->aes_dec_key_expanded,
+ (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/**
+ * @brief DES cipher encryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DES_CBC_ENC(JOB_AES_HMAC *job)
+{
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ des_enc_cbc_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes &
+ (~(DES_BLOCK_SIZE - 1)),
+ job->aes_enc_key_expanded, (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/**
+ * @brief DES cipher decryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DES_CBC_DEC(JOB_AES_HMAC *job)
+{
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ des_dec_cbc_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes &
+ (~(DES_BLOCK_SIZE - 1)),
+ job->aes_dec_key_expanded, (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/**
+ * @brief 3DES cipher encryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DES3_CBC_ENC(JOB_AES_HMAC *job)
+{
+ const void * const *ks_ptr =
+ (const void * const *)job->aes_enc_key_expanded;
+
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ des3_enc_cbc_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes &
+ (~(DES_BLOCK_SIZE - 1)),
+ ks_ptr[0], ks_ptr[1], ks_ptr[2],
+ (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/**
+ * @brief 3DES cipher decryption
+ *
+ * @param job desriptor of performed crypto operation
+ * @return It always returns value passed in \a job
+ */
+__forceinline
+JOB_AES_HMAC *
+DES3_CBC_DEC(JOB_AES_HMAC *job)
+{
+ const void * const *ks_ptr =
+ (const void * const *)job->aes_dec_key_expanded;
+
+ IMB_ASSERT(!(job->status & STS_COMPLETED_AES));
+ des3_dec_cbc_basic(job->src + job->cipher_start_src_offset_in_bytes,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes &
+ (~(DES_BLOCK_SIZE - 1)),
+ ks_ptr[0], ks_ptr[1], ks_ptr[2],
+ (const uint64_t *)job->iv);
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ========================================================================= */
+/* Cipher submit & flush functions */
+/* ========================================================================= */
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_ENC(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (CBC == job->cipher_mode) {
+ if (16 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES128_ENC(&state->aes128_ooo, job);
+ } else if (24 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES192_ENC(&state->aes192_ooo, job);
+ } else { /* assume 32 */
+ return SUBMIT_JOB_AES256_ENC(&state->aes256_ooo, job);
+ }
+ } else if (CNTR == job->cipher_mode) {
+ return SUBMIT_JOB_AES_CNTR(job);
+ } else if (CNTR_BITLEN == job->cipher_mode) {
+ return SUBMIT_JOB_AES_CNTR_BIT(job);
+ } else if (ECB == job->cipher_mode) {
+ if (16 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES_ECB_128_ENC(job);
+ } else if (24 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES_ECB_192_ENC(job);
+ } else { /* assume 32 */
+ return SUBMIT_JOB_AES_ECB_256_ENC(job);
+ }
+ } else if (DOCSIS_SEC_BPI == job->cipher_mode) {
+ if (job->msg_len_to_cipher_in_bytes >= AES_BLOCK_SIZE) {
+ JOB_AES_HMAC *tmp;
+
+ tmp = SUBMIT_JOB_AES128_ENC(&state->docsis_sec_ooo,
+ job);
+ return DOCSIS_LAST_BLOCK(tmp);
+ } else
+ return DOCSIS_FIRST_BLOCK(job);
+ } else if (PON_AES_CNTR == job->cipher_mode) {
+ if (job->msg_len_to_cipher_in_bytes == 0)
+ return SUBMIT_JOB_PON_ENC_NO_CTR(job);
+ else
+ return SUBMIT_JOB_PON_ENC(job);
+#ifndef NO_GCM
+ } else if (GCM == job->cipher_mode) {
+ return SUBMIT_JOB_AES_GCM_ENC(state, job);
+#endif /* NO_GCM */
+ } else if (CUSTOM_CIPHER == job->cipher_mode) {
+ return SUBMIT_JOB_CUSTOM_CIPHER(job);
+ } else if (DES == job->cipher_mode) {
+#ifdef SUBMIT_JOB_DES_CBC_ENC
+ return SUBMIT_JOB_DES_CBC_ENC(&state->des_enc_ooo, job);
+#else
+ return DES_CBC_ENC(job);
+#endif /* SUBMIT_JOB_DES_CBC_ENC */
+ } else if (DOCSIS_DES == job->cipher_mode) {
+#ifdef SUBMIT_JOB_DOCSIS_DES_ENC
+ return SUBMIT_JOB_DOCSIS_DES_ENC(&state->docsis_des_enc_ooo,
+ job);
+#else
+ return DOCSIS_DES_ENC(job);
+#endif /* SUBMIT_JOB_DOCSIS_DES_ENC */
+ } else if (DES3 == job->cipher_mode) {
+#ifdef SUBMIT_JOB_3DES_CBC_ENC
+ return SUBMIT_JOB_3DES_CBC_ENC(&state->des3_enc_ooo, job);
+#else
+ return DES3_CBC_ENC(job);
+#endif
+ } else if (CCM == job->cipher_mode) {
+ return AES_CNTR_CCM_128(job);
+ } else { /* assume NULL_CIPHER */
+ job->status |= STS_COMPLETED_AES;
+ return job;
+ }
+}
+
+__forceinline
+JOB_AES_HMAC *
+FLUSH_JOB_AES_ENC(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (CBC == job->cipher_mode) {
+ if (16 == job->aes_key_len_in_bytes) {
+ return FLUSH_JOB_AES128_ENC(&state->aes128_ooo);
+ } else if (24 == job->aes_key_len_in_bytes) {
+ return FLUSH_JOB_AES192_ENC(&state->aes192_ooo);
+ } else { /* assume 32 */
+ return FLUSH_JOB_AES256_ENC(&state->aes256_ooo);
+ }
+#ifndef NO_GCM
+ } else if (GCM == job->cipher_mode) {
+ return FLUSH_JOB_AES_GCM_ENC(state, job);
+#endif /* NO_GCM */
+ } else if (DOCSIS_SEC_BPI == job->cipher_mode) {
+ JOB_AES_HMAC *tmp;
+
+ tmp = FLUSH_JOB_AES128_ENC(&state->docsis_sec_ooo);
+ return DOCSIS_LAST_BLOCK(tmp);
+#ifdef FLUSH_JOB_DES_CBC_ENC
+ } else if (DES == job->cipher_mode) {
+ return FLUSH_JOB_DES_CBC_ENC(&state->des_enc_ooo);
+#endif /* FLUSH_JOB_DES_CBC_ENC */
+#ifdef FLUSH_JOB_3DES_CBC_ENC
+ } else if (DES3 == job->cipher_mode) {
+ return FLUSH_JOB_3DES_CBC_ENC(&state->des3_enc_ooo);
+#endif /* FLUSH_JOB_3DES_CBC_ENC */
+#ifdef FLUSH_JOB_DOCSIS_DES_ENC
+ } else if (DOCSIS_DES == job->cipher_mode) {
+ return FLUSH_JOB_DOCSIS_DES_ENC(&state->docsis_des_enc_ooo);
+#endif /* FLUSH_JOB_DOCSIS_DES_ENC */
+ } else if (CUSTOM_CIPHER == job->cipher_mode) {
+ return FLUSH_JOB_CUSTOM_CIPHER(job);
+ } else { /* assume CNTR/CNTR_BITLEN, ECB, CCM or NULL_CIPHER */
+ return NULL;
+ }
+}
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_AES_DEC(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (CBC == job->cipher_mode) {
+ if (16 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES128_DEC(job);
+ } else if (24 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES192_DEC(job);
+ } else { /* assume 32 */
+ return SUBMIT_JOB_AES256_DEC(job);
+ }
+ } else if (CNTR == job->cipher_mode) {
+ return SUBMIT_JOB_AES_CNTR(job);
+ } else if (CNTR_BITLEN == job->cipher_mode) {
+ return SUBMIT_JOB_AES_CNTR_BIT(job);
+ } else if (ECB == job->cipher_mode) {
+ if (16 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES_ECB_128_DEC(job);
+ } else if (24 == job->aes_key_len_in_bytes) {
+ return SUBMIT_JOB_AES_ECB_192_DEC(job);
+ } else { /* assume 32 */
+ return SUBMIT_JOB_AES_ECB_256_DEC(job);
+ }
+ } else if (DOCSIS_SEC_BPI == job->cipher_mode) {
+ if (job->msg_len_to_cipher_in_bytes >= AES_BLOCK_SIZE) {
+ DOCSIS_LAST_BLOCK(job);
+ return SUBMIT_JOB_AES128_DEC(job);
+ } else {
+ return DOCSIS_FIRST_BLOCK(job);
+ }
+ } else if (PON_AES_CNTR == job->cipher_mode) {
+ if (job->msg_len_to_cipher_in_bytes == 0)
+ return SUBMIT_JOB_PON_DEC_NO_CTR(job);
+ else
+ return SUBMIT_JOB_PON_DEC(job);
+#ifndef NO_GCM
+ } else if (GCM == job->cipher_mode) {
+ return SUBMIT_JOB_AES_GCM_DEC(state, job);
+#endif /* NO_GCM */
+ } else if (DES == job->cipher_mode) {
+#ifdef SUBMIT_JOB_DES_CBC_DEC
+ return SUBMIT_JOB_DES_CBC_DEC(&state->des_dec_ooo, job);
+#else
+ (void) state;
+ return DES_CBC_DEC(job);
+#endif /* SUBMIT_JOB_DES_CBC_DEC */
+ } else if (DOCSIS_DES == job->cipher_mode) {
+#ifdef SUBMIT_JOB_DOCSIS_DES_DEC
+ return SUBMIT_JOB_DOCSIS_DES_DEC(&state->docsis_des_dec_ooo,
+ job);
+#else
+ return DOCSIS_DES_DEC(job);
+#endif /* SUBMIT_JOB_DOCSIS_DES_DEC */
+ } else if (DES3 == job->cipher_mode) {
+#ifdef SUBMIT_JOB_3DES_CBC_DEC
+ return SUBMIT_JOB_3DES_CBC_DEC(&state->des3_dec_ooo, job);
+#else
+ return DES3_CBC_DEC(job);
+#endif
+ } else if (CUSTOM_CIPHER == job->cipher_mode) {
+ return SUBMIT_JOB_CUSTOM_CIPHER(job);
+ } else if (CCM == job->cipher_mode) {
+ return AES_CNTR_CCM_128(job);
+ } else {
+ /* assume NULL_CIPHER */
+ job->status |= STS_COMPLETED_AES;
+ return job;
+ }
+}
+
+__forceinline
+JOB_AES_HMAC *
+FLUSH_JOB_AES_DEC(MB_MGR *state, JOB_AES_HMAC *job)
+{
+#ifndef NO_GCM
+ if (GCM == job->cipher_mode)
+ return FLUSH_JOB_AES_GCM_DEC(state, job);
+#endif /* NO_GCM */
+#ifdef FLUSH_JOB_DES_CBC_DEC
+ if (DES == job->cipher_mode)
+ return FLUSH_JOB_DES_CBC_DEC(&state->des_dec_ooo);
+#endif /* FLUSH_JOB_DES_CBC_DEC */
+#ifdef FLUSH_JOB_3DES_CBC_DEC
+ if (DES3 == job->cipher_mode)
+ return FLUSH_JOB_3DES_CBC_DEC(&state->des3_dec_ooo);
+#endif /* FLUSH_JOB_3DES_CBC_DEC */
+#ifdef FLUSH_JOB_DOCSIS_DES_DEC
+ if (DOCSIS_DES == job->cipher_mode)
+ return FLUSH_JOB_DOCSIS_DES_DEC(&state->docsis_des_dec_ooo);
+#endif /* FLUSH_JOB_DOCSIS_DES_DEC */
+ (void) state;
+ return NULL;
+}
+
+/* ========================================================================= */
+/* Hash submit & flush functions */
+/* ========================================================================= */
+
+__forceinline
+JOB_AES_HMAC *
+SUBMIT_JOB_HASH(MB_MGR *state, JOB_AES_HMAC *job)
+{
+#ifdef VERBOSE
+ printf("--------Enter SUBMIT_JOB_HASH --------------\n");
+#endif
+ switch (job->hash_alg) {
+ case SHA1:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return SUBMIT_JOB_HMAC_NI(&state->hmac_sha_1_ooo, job);
+#endif
+ return SUBMIT_JOB_HMAC(&state->hmac_sha_1_ooo, job);
+ case SHA_224:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return SUBMIT_JOB_HMAC_SHA_224_NI
+ (&state->hmac_sha_224_ooo, job);
+#endif
+ return SUBMIT_JOB_HMAC_SHA_224(&state->hmac_sha_224_ooo, job);
+ case SHA_256:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return SUBMIT_JOB_HMAC_SHA_256_NI
+ (&state->hmac_sha_256_ooo, job);
+#endif
+ return SUBMIT_JOB_HMAC_SHA_256(&state->hmac_sha_256_ooo, job);
+ case SHA_384:
+ return SUBMIT_JOB_HMAC_SHA_384(&state->hmac_sha_384_ooo, job);
+ case SHA_512:
+ return SUBMIT_JOB_HMAC_SHA_512(&state->hmac_sha_512_ooo, job);
+ case AES_XCBC:
+ return SUBMIT_JOB_AES_XCBC(&state->aes_xcbc_ooo, job);
+ case MD5:
+ return SUBMIT_JOB_HMAC_MD5(&state->hmac_md5_ooo, job);
+ case CUSTOM_HASH:
+ return SUBMIT_JOB_CUSTOM_HASH(job);
+ case AES_CCM:
+ return SUBMIT_JOB_AES_CCM_AUTH(&state->aes_ccm_ooo, job);
+ case AES_CMAC:
+ /*
+ * CMAC OOO MGR assumes job len in bits
+ * (for CMAC length is provided in bytes)
+ */
+ job->msg_len_to_hash_in_bits =
+ job->msg_len_to_hash_in_bytes * 8;
+ return SUBMIT_JOB_AES_CMAC_AUTH(&state->aes_cmac_ooo, job);
+ case AES_CMAC_BITLEN:
+ return SUBMIT_JOB_AES_CMAC_AUTH(&state->aes_cmac_ooo, job);
+ case PLAIN_SHA1:
+ IMB_SHA1(state,
+ job->src + job->hash_start_src_offset_in_bytes,
+ job->msg_len_to_hash_in_bytes, job->auth_tag_output);
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ case PLAIN_SHA_224:
+ IMB_SHA224(state,
+ job->src + job->hash_start_src_offset_in_bytes,
+ job->msg_len_to_hash_in_bytes, job->auth_tag_output);
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ case PLAIN_SHA_256:
+ IMB_SHA256(state,
+ job->src + job->hash_start_src_offset_in_bytes,
+ job->msg_len_to_hash_in_bytes, job->auth_tag_output);
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ case PLAIN_SHA_384:
+ IMB_SHA384(state,
+ job->src + job->hash_start_src_offset_in_bytes,
+ job->msg_len_to_hash_in_bytes, job->auth_tag_output);
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ case PLAIN_SHA_512:
+ IMB_SHA512(state,
+ job->src + job->hash_start_src_offset_in_bytes,
+ job->msg_len_to_hash_in_bytes, job->auth_tag_output);
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ default: /* assume GCM, PON_CRC_BIP or NULL_HASH */
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ }
+}
+
+__forceinline
+JOB_AES_HMAC *
+FLUSH_JOB_HASH(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ switch (job->hash_alg) {
+ case SHA1:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return FLUSH_JOB_HMAC_NI(&state->hmac_sha_1_ooo);
+#endif
+ return FLUSH_JOB_HMAC(&state->hmac_sha_1_ooo);
+ case SHA_224:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return FLUSH_JOB_HMAC_SHA_224_NI
+ (&state->hmac_sha_224_ooo);
+#endif
+ return FLUSH_JOB_HMAC_SHA_224(&state->hmac_sha_224_ooo);
+ case SHA_256:
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI)
+ return FLUSH_JOB_HMAC_SHA_256_NI
+ (&state->hmac_sha_256_ooo);
+#endif
+ return FLUSH_JOB_HMAC_SHA_256(&state->hmac_sha_256_ooo);
+ case SHA_384:
+ return FLUSH_JOB_HMAC_SHA_384(&state->hmac_sha_384_ooo);
+ case SHA_512:
+ return FLUSH_JOB_HMAC_SHA_512(&state->hmac_sha_512_ooo);
+ case AES_XCBC:
+ return FLUSH_JOB_AES_XCBC(&state->aes_xcbc_ooo);
+ case MD5:
+ return FLUSH_JOB_HMAC_MD5(&state->hmac_md5_ooo);
+ case CUSTOM_HASH:
+ return FLUSH_JOB_CUSTOM_HASH(job);
+ case AES_CCM:
+ return FLUSH_JOB_AES_CCM_AUTH(&state->aes_ccm_ooo);
+ case AES_CMAC:
+ case AES_CMAC_BITLEN:
+ return FLUSH_JOB_AES_CMAC_AUTH(&state->aes_cmac_ooo);
+ default: /* assume GCM or NULL_HASH */
+ if (!(job->status & STS_COMPLETED_HMAC)) {
+ job->status |= STS_COMPLETED_HMAC;
+ return job;
+ }
+ /* if HMAC is complete then return NULL */
+ return NULL;
+ }
+}
+
+
+/* ========================================================================= */
+/* Job submit & flush functions */
+/* ========================================================================= */
+
+#ifdef DEBUG
+#define DEBUG_PUTS(s) \
+ fputs(s, stderr)
+#ifdef _WIN32
+#define INVALID_PRN(_fmt, ...) \
+ fprintf(stderr, "%s():%d: " _fmt, __FUNCTION__, __LINE__, __VA_ARGS__)
+
+#else
+#define INVALID_PRN(_fmt, ...) \
+ fprintf(stderr, "%s():%d: " _fmt, __func__, __LINE__, __VA_ARGS__)
+#endif
+#else
+#define INVALID_PRN(_fmt, ...)
+#define DEBUG_PUTS(s)
+#endif
+
+__forceinline int
+is_job_invalid(const JOB_AES_HMAC *job)
+{
+ const uint64_t auth_tag_len_fips[] = {
+ 0, /* INVALID selection */
+ 20, /* SHA1 */
+ 28, /* SHA_224 */
+ 32, /* SHA_256 */
+ 48, /* SHA_384 */
+ 64, /* SHA_512 */
+ 12, /* AES_XCBC */
+ 16, /* MD5 */
+ 0, /* NULL_HASH */
+#ifndef NO_GCM
+ 16, /* AES_GMAC */
+#endif
+ 0, /* CUSTOM HASH */
+ 0, /* AES_CCM */
+ 16, /* AES_CMAC */
+ };
+ const uint64_t auth_tag_len_ipsec[] = {
+ 0, /* INVALID selection */
+ 12, /* SHA1 */
+ 14, /* SHA_224 */
+ 16, /* SHA_256 */
+ 24, /* SHA_384 */
+ 32, /* SHA_512 */
+ 12, /* AES_XCBC */
+ 12, /* MD5 */
+ 0, /* NULL_HASH */
+#ifndef NO_GCM
+ 16, /* AES_GMAC */
+#endif
+ 0, /* CUSTOM HASH */
+ 0, /* AES_CCM */
+ 16, /* AES_CMAC */
+ 20, /* PLAIN_SHA1 */
+ 28, /* PLAIN_SHA_224 */
+ 32, /* PLAIN_SHA_256 */
+ 48, /* PLAIN_SHA_384 */
+ 64, /* PLAIN_SHA_512 */
+ 4, /* AES_CMAC 3GPP */
+ };
+
+ /* Maximum length of buffer in PON is 2^14 + 8, since maximum
+ * PLI value is 2^14 - 1 + 1 extra byte of padding + 8 bytes
+ * of XGEM header */
+ const uint64_t max_pon_len = (1 << 14) + 8;
+
+ switch (job->cipher_mode) {
+ case CBC:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == ENCRYPT &&
+ job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == DECRYPT &&
+ job->aes_dec_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(16) &&
+ job->aes_key_len_in_bytes != UINT64_C(24) &&
+ job->aes_key_len_in_bytes != UINT64_C(32)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes & UINT64_C(15)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case ECB:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(16) &&
+ job->aes_key_len_in_bytes != UINT64_C(24) &&
+ job->aes_key_len_in_bytes != UINT64_C(32)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes & UINT64_C(15)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(0)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case CNTR:
+ case CNTR_BITLEN:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(16) &&
+ job->aes_key_len_in_bytes != UINT64_C(24) &&
+ job->aes_key_len_in_bytes != UINT64_C(32)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(16) &&
+ job->iv_len_in_bytes != UINT64_C(12)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ /*
+ * msg_len_to_cipher_in_bits is used with CNTR_BITLEN, but it is
+ * effectively the same field as msg_len_to_cipher_in_bytes,
+ * since it is part of the same union
+ */
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case NULL_CIPHER:
+ /*
+ * No checks required for this mode
+ * @note NULL cipher doesn't perform memory copy operation
+ * from source to destination
+ */
+ break;
+ case DOCSIS_SEC_BPI:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_enc_key_expanded == NULL) {
+ /* it has to be set regardless of direction (AES-CFB) */
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == DECRYPT &&
+ job->aes_dec_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+#ifndef NO_GCM
+ case GCM:
+ if (job->msg_len_to_cipher_in_bytes != 0 && job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes != 0 && job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ /* Same key structure used for encrypt and decrypt */
+ if (job->cipher_direction == ENCRYPT &&
+ job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == DECRYPT &&
+ job->aes_dec_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(16) &&
+ job->aes_key_len_in_bytes != UINT64_C(24) &&
+ job->aes_key_len_in_bytes != UINT64_C(32)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(12)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->hash_alg != AES_GMAC) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+#endif /* !NO_GCM */
+ case CUSTOM_CIPHER:
+ /* no checks here */
+ if (job->cipher_func == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case DES:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == ENCRYPT &&
+ job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == DECRYPT &&
+ job->aes_dec_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(8)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes & UINT64_C(7)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(8)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case DOCSIS_DES:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == ENCRYPT &&
+ job->aes_enc_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == DECRYPT &&
+ job->aes_dec_key_expanded == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(8)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(8)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case CCM:
+ if (job->msg_len_to_cipher_in_bytes != 0) {
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_enc_key_expanded == NULL) {
+ /* AES-CTR and CBC-MAC use only encryption keys */
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ /* currently only AES-CCM-128 is supported */
+ if (job->aes_key_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ /*
+ * From RFC3610:
+ * Nonce length = 15 - L
+ * Valid L values are: 2 to 8
+ * Then valid nonce lengths 13 to 7 (inclusive).
+ */
+ if (job->iv_len_in_bytes > UINT64_C(13) ||
+ job->iv_len_in_bytes < UINT64_C(7)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->hash_alg != AES_CCM) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ break;
+ case DES3:
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->aes_key_len_in_bytes != UINT64_C(24)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes == 0) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->msg_len_to_cipher_in_bytes & UINT64_C(7)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(8)) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->cipher_direction == ENCRYPT) {
+ const void * const *ks_ptr =
+ (const void * const *)job->aes_enc_key_expanded;
+
+ if (ks_ptr == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ if (ks_ptr[0] == NULL || ks_ptr[1] == NULL ||
+ ks_ptr[2] == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ } else {
+ const void * const *ks_ptr =
+ (const void * const *)job->aes_dec_key_expanded;
+
+ if (ks_ptr == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ if (ks_ptr[0] == NULL || ks_ptr[1] == NULL ||
+ ks_ptr[2] == NULL) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ }
+ break;
+ case PON_AES_CNTR:
+ /*
+ * CRC and cipher are done together. A few assumptions:
+ * - CRC and cipher start offsets are the same
+ * - last 4 bytes (32 bits) of the buffer is CRC
+ * - updated CRC value is put into the source buffer
+ * (encryption only)
+ * - CRC length is msg_len_to_cipher_in_bytes - 4 bytes
+ * - msg_len_to_cipher_in_bytes is aligned to 4 bytes
+ * - If msg_len_to_cipher_in_bytes is 0, IV and key pointers
+ * are not required, as encryption is not done
+ */
+ if (job->src == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->dst == NULL) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+
+ /* source and destination buffer pointers cannot be the same,
+ * as there are always 8 bytes that are not ciphered */
+ if (job->src == job->dst) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ if (job->hash_alg != PON_CRC_BIP) {
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+ /*
+ * If message length to cipher != 0, AES-CTR is performed and
+ * key and IV require to be set properly
+ */
+ if (job->msg_len_to_cipher_in_bytes != UINT64_C(0)) {
+
+ /* message size needs to be aligned to 4 bytes */
+ if ((job->msg_len_to_cipher_in_bytes & 3) != 0) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+
+ /* Substract 8 bytes to maximum length since
+ * XGEM header is not ciphered */
+ if ((job->msg_len_to_cipher_in_bytes >
+ (max_pon_len - 8))) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+
+ if (job->aes_key_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ if (job->iv_len_in_bytes != UINT64_C(16)) {
+ INVALID_PRN("cipher_mode:%d\n",
+ job->cipher_mode);
+ return 1;
+ }
+ }
+ break;
+ default:
+ INVALID_PRN("cipher_mode:%d\n", job->cipher_mode);
+ return 1;
+ }
+
+ switch (job->hash_alg) {
+ case SHA1:
+ case AES_XCBC:
+ case MD5:
+ case SHA_224:
+ case SHA_256:
+ case SHA_384:
+ case SHA_512:
+ if (job->src == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output_len_in_bytes !=
+ auth_tag_len_ipsec[job->hash_alg] &&
+ job->auth_tag_output_len_in_bytes !=
+ auth_tag_len_fips[job->hash_alg]) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->msg_len_to_hash_in_bytes == 0) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ case NULL_HASH:
+ break;
+#ifndef NO_GCM
+ case AES_GMAC:
+ if (job->auth_tag_output_len_in_bytes < UINT64_C(1) ||
+ job->auth_tag_output_len_in_bytes > UINT64_C(16)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if ((job->u.GCM.aad_len_in_bytes > 0) &&
+ (job->u.GCM.aad == NULL)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->cipher_mode != GCM) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ /*
+ * msg_len_to_hash_in_bytes not checked against zero.
+ * It is not used for AES-GCM & GMAC - see
+ * SUBMIT_JOB_AES_GCM_ENC and SUBMIT_JOB_AES_GCM_DEC functions.
+ */
+ break;
+#endif /* !NO_GCM */
+ case CUSTOM_HASH:
+ if (job->hash_func == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ case AES_CCM:
+ if (job->msg_len_to_hash_in_bytes != 0 && job->src == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->u.CCM.aad_len_in_bytes > 46) {
+ /* 3 x AES_BLOCK - 2 bytes for AAD len */
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if ((job->u.CCM.aad_len_in_bytes > 0) &&
+ (job->u.CCM.aad == NULL)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ /* M can be any even number from 4 to 16 */
+ if (job->auth_tag_output_len_in_bytes < UINT64_C(4) ||
+ job->auth_tag_output_len_in_bytes > UINT64_C(16) ||
+ ((job->auth_tag_output_len_in_bytes & 1) != 0)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->cipher_mode != CCM) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ /*
+ * AES-CCM allows for only one message for
+ * cipher and uthentication.
+ * AAD can be used to extend authentication over
+ * clear text fields.
+ */
+ if (job->msg_len_to_cipher_in_bytes !=
+ job->msg_len_to_hash_in_bytes) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->cipher_start_src_offset_in_bytes !=
+ job->hash_start_src_offset_in_bytes) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ case AES_CMAC:
+ case AES_CMAC_BITLEN:
+ /*
+ * WARNING: When using AES_CMAC_BITLEN, length of message
+ * is passed in bits, using job->msg_len_to_hash_in_bits
+ * (unlike "normal" AES_CMAC, where is passed in bytes,
+ * using job->msg_len_to_hash_in_bytes).
+ */
+ if (job->src == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if ((job->u.CMAC._key_expanded == NULL) ||
+ (job->u.CMAC._skey1 == NULL) ||
+ (job->u.CMAC._skey2 == NULL)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ /* T is 128 bits but 96 bits is also allowed due to
+ * IPsec use case (RFC 4494) and 32 bits for CMAC 3GPP.
+ */
+ if (job->auth_tag_output_len_in_bytes < UINT64_C(4) ||
+ job->auth_tag_output_len_in_bytes > UINT64_C(16)) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ case PLAIN_SHA1:
+ case PLAIN_SHA_224:
+ case PLAIN_SHA_256:
+ case PLAIN_SHA_384:
+ case PLAIN_SHA_512:
+ if (job->auth_tag_output_len_in_bytes !=
+ auth_tag_len_ipsec[job->hash_alg]) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->src == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ case PON_CRC_BIP:
+ /*
+ * Authentication tag in PON is BIP 32-bit value only
+ * CRC is done together with cipher,
+ * its initial value is read from the source buffer and
+ * updated value put into the destination buffer.
+ * - msg_len_to_hash_in_bytes is aligned to 4 bytes
+ */
+ if (((job->msg_len_to_hash_in_bytes & UINT64_C(3)) != 0) ||
+ (job->msg_len_to_hash_in_bytes < UINT64_C(8)) ||
+ (job->msg_len_to_hash_in_bytes > max_pon_len)) {
+ /*
+ * Length aligned to 4 bytes (and at least 8 bytes,
+ * including 8-byte XGEM header and no more
+ * than max length)
+ */
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output_len_in_bytes != UINT64_C(8)) {
+ /* 64-bits:
+ * - BIP 32-bits
+ * - CRC 32-bits
+ */
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->cipher_mode != PON_AES_CNTR) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ if (job->auth_tag_output == NULL) {
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ break;
+ default:
+ INVALID_PRN("hash_alg:%d\n", job->hash_alg);
+ return 1;
+ }
+ return 0;
+}
+
+__forceinline
+JOB_AES_HMAC *SUBMIT_JOB_AES(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (job->cipher_direction == ENCRYPT)
+ job = SUBMIT_JOB_AES_ENC(state, job);
+ else
+ job = SUBMIT_JOB_AES_DEC(state, job);
+
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *FLUSH_JOB_AES(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (job->cipher_direction == ENCRYPT)
+ job = FLUSH_JOB_AES_ENC(state, job);
+ else
+ job = FLUSH_JOB_AES_DEC(state, job);
+
+ return job;
+}
+
+/* submit a half-completed job, based on the status */
+__forceinline
+JOB_AES_HMAC *RESUBMIT_JOB(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ while (job != NULL && job->status < STS_COMPLETED) {
+ if (job->status == STS_COMPLETED_HMAC)
+ job = SUBMIT_JOB_AES(state, job);
+ else /* assumed job->status = STS_COMPLETED_AES */
+ job = SUBMIT_JOB_HASH(state, job);
+ }
+
+ return job;
+}
+
+__forceinline
+JOB_AES_HMAC *submit_new_job(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (job->chain_order == CIPHER_HASH)
+ job = SUBMIT_JOB_AES(state, job);
+ else
+ job = SUBMIT_JOB_HASH(state, job);
+
+ job = RESUBMIT_JOB(state, job);
+ return job;
+}
+
+__forceinline
+void complete_job(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ if (job->chain_order == CIPHER_HASH) {
+ /* while() loop optimized for cipher_hash order */
+ while (job->status < STS_COMPLETED) {
+ JOB_AES_HMAC *tmp = FLUSH_JOB_AES(state, job);
+
+ if (tmp == NULL)
+ tmp = FLUSH_JOB_HASH(state, job);
+
+ (void) RESUBMIT_JOB(state, tmp);
+ }
+ } else {
+ /* while() loop optimized for hash_cipher order */
+ while (job->status < STS_COMPLETED) {
+ JOB_AES_HMAC *tmp = FLUSH_JOB_HASH(state, job);
+
+ if (tmp == NULL)
+ tmp = FLUSH_JOB_AES(state, job);
+
+ (void) RESUBMIT_JOB(state, tmp);
+ }
+ }
+}
+
+__forceinline
+JOB_AES_HMAC *
+submit_job_and_check(MB_MGR *state, const int run_check)
+{
+#ifdef SAFE_PARAM
+ if (state == NULL) {
+ DEBUG_PUTS("submit job and check\n");
+ return NULL;
+ }
+#endif
+
+ JOB_AES_HMAC *job = NULL;
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+
+ job = JOBS(state, state->next_job);
+
+ if (run_check) {
+ if (is_job_invalid(job)) {
+ job->status = STS_INVALID_ARGS;
+ } else {
+ job->status = STS_BEING_PROCESSED;
+ job = submit_new_job(state, job);
+ }
+ } else {
+ job->status = STS_BEING_PROCESSED;
+ job = submit_new_job(state, job);
+ }
+
+ if (state->earliest_job < 0) {
+ /* state was previously empty */
+ if (job == NULL)
+ state->earliest_job = state->next_job;
+ ADV_JOBS(&state->next_job);
+ goto exit;
+ }
+
+ ADV_JOBS(&state->next_job);
+
+ if (state->earliest_job == state->next_job) {
+ /* Full */
+ job = JOBS(state, state->earliest_job);
+ complete_job(state, job);
+ ADV_JOBS(&state->earliest_job);
+ goto exit;
+ }
+
+ /* not full */
+ job = JOBS(state, state->earliest_job);
+ if (job->status < STS_COMPLETED) {
+ job = NULL;
+ goto exit;
+ }
+
+ ADV_JOBS(&state->earliest_job);
+exit:
+#ifdef SAFE_DATA
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+ return job;
+}
+
+JOB_AES_HMAC *
+SUBMIT_JOB(MB_MGR *state)
+{
+ return submit_job_and_check(state, 1);
+}
+
+JOB_AES_HMAC *
+SUBMIT_JOB_NOCHECK(MB_MGR *state)
+{
+ return submit_job_and_check(state, 0);
+}
+
+JOB_AES_HMAC *
+FLUSH_JOB(MB_MGR *state)
+{
+#ifdef SAFE_PARAM
+ if (state == NULL) {
+ DEBUG_PUTS("flush job\n");
+ return NULL;
+ }
+#endif
+ JOB_AES_HMAC *job;
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+#endif
+
+ if (state->earliest_job < 0)
+ return NULL; /* empty */
+
+#ifndef LINUX
+ SAVE_XMMS(xmm_save);
+#endif
+ job = JOBS(state, state->earliest_job);
+ complete_job(state, job);
+
+ ADV_JOBS(&state->earliest_job);
+
+ if (state->earliest_job == state->next_job)
+ state->earliest_job = -1; /* becomes empty */
+
+#ifdef SAFE_DATA
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif /* SAFE_DATA */
+
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+ return job;
+}
+
+/* ========================================================================= */
+/* ========================================================================= */
+
+uint32_t
+QUEUE_SIZE(MB_MGR *state)
+{
+#ifdef SAFE_PARAM
+ if (state == NULL) {
+ DEBUG_PUTS("queue size\n");
+ return 0;
+ }
+#endif
+ int a, b;
+
+ if (state->earliest_job < 0)
+ return 0;
+ a = state->next_job / sizeof(JOB_AES_HMAC);
+ b = state->earliest_job / sizeof(JOB_AES_HMAC);
+ return ((a-b) & (MAX_JOBS-1));
+}
+
+JOB_AES_HMAC *
+GET_COMPLETED_JOB(MB_MGR *state)
+{
+#ifdef SAFE_PARAM
+ if (state == NULL) {
+ DEBUG_PUTS("get completed job\n");
+ return NULL;
+ }
+#endif
+ JOB_AES_HMAC *job;
+
+ if (state->earliest_job < 0)
+ return NULL;
+
+ job = JOBS(state, state->earliest_job);
+ if (job->status < STS_COMPLETED)
+ return NULL;
+
+ ADV_JOBS(&state->earliest_job);
+
+ if (state->earliest_job == state->next_job)
+ state->earliest_job = -1;
+
+ return job;
+}
+
+JOB_AES_HMAC *
+GET_NEXT_JOB(MB_MGR *state)
+{
+#ifdef SAFE_PARAM
+ if (state == NULL) {
+ DEBUG_PUTS("get next job\n");
+ return NULL;
+ }
+#endif
+ return JOBS(state, state->next_job);
+}
diff --git a/src/spdk/intel-ipsec-mb/mb_mgr_datastruct.asm b/src/spdk/intel-ipsec-mb/mb_mgr_datastruct.asm
new file mode 100644
index 000000000..adc6a2f90
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/mb_mgr_datastruct.asm
@@ -0,0 +1,330 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/datastruct.asm"
+%include "constants.asm"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define constants
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define MAX_AES_JOBS 128
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define AES_ARGS and AES Out of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; AES_ARGS
+;; name size align
+FIELD _aesarg_in, 8*16, 8 ; array of 16 pointers to in text
+FIELD _aesarg_out, 8*16, 8 ; array of 16 pointers to out text
+FIELD _aesarg_keys, 8*16, 8 ; array of 16 pointers to keys
+FIELD _aesarg_IV, 16*16, 64 ; array of 16 128-bit IV's
+FIELD _aesarg_key_tab,16*16*15, 64 ; array of 128-bit round keys
+END_FIELDS
+%assign _AES_ARGS_size _FIELD_OFFSET
+%assign _AES_ARGS_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_AES_OOO
+;; name size align
+FIELD _aes_args, _AES_ARGS_size, _AES_ARGS_align
+FIELD _aes_lens, 16*2, 16
+FIELD _aes_unused_lanes, 8, 8
+FIELD _aes_job_in_lane, 16*8, 8
+FIELD _aes_lanes_in_use, 8, 8
+END_FIELDS
+%assign _MB_MGR_AES_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_AES_OOO_align _STRUCT_ALIGN
+
+_aes_args_in equ _aes_args + _aesarg_in
+_aes_args_out equ _aes_args + _aesarg_out
+_aes_args_keys equ _aes_args + _aesarg_keys
+_aes_args_IV equ _aes_args + _aesarg_IV
+_aes_args_key_tab equ _aes_args + _aesarg_key_tab
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define XCBC Out of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; AES_XCBC_ARGS_X8
+;; name size align
+FIELD _aesxcbcarg_in, 8*8, 8 ; array of 8 pointers to in text
+FIELD _aesxcbcarg_keys, 8*8, 8 ; array of 8 pointers to keys
+FIELD _aesxcbcarg_ICV, 16*8, 32 ; array of 8 128-bit ICV's
+END_FIELDS
+%assign _AES_XCBC_ARGS_X8_size _FIELD_OFFSET
+%assign _AES_XCBC_ARGS_X8_align _STRUCT_ALIGN
+
+START_FIELDS ; XCBC_LANE_DATA
+;;; name size align
+FIELD _xcbc_final_block, 2*16, 32 ; final block with padding
+FIELD _xcbc_job_in_lane, 8, 8 ; pointer to job object
+FIELD _xcbc_final_done, 8, 8 ; offset to start of data
+END_FIELDS
+%assign _XCBC_LANE_DATA_size _FIELD_OFFSET
+%assign _XCBC_LANE_DATA_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_AES_XCBC_OOO
+;; name size align
+FIELD _aes_xcbc_args, _AES_XCBC_ARGS_X8_size, _AES_XCBC_ARGS_X8_align
+FIELD _aes_xcbc_lens, 16, 16
+FIELD _aes_xcbc_unused_lanes, 8, 8
+FIELD _aes_xcbc_ldata, _XCBC_LANE_DATA_size*8, _XCBC_LANE_DATA_align
+END_FIELDS
+%assign _MB_MGR_AES_XCBC_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_AES_XCBC_OOO_align _STRUCT_ALIGN
+
+_aes_xcbc_args_in equ _aes_xcbc_args + _aesxcbcarg_in
+_aes_xcbc_args_keys equ _aes_xcbc_args + _aesxcbcarg_keys
+_aes_xcbc_args_ICV equ _aes_xcbc_args + _aesxcbcarg_ICV
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define CMAC Out of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_CMAC_OOO
+;; name size align
+FIELD _aes_cmac_args, _AES_ARGS_size, _AES_ARGS_align
+FIELD _aes_cmac_lens, 8*2, 16
+FIELD _aes_cmac_init_done, 8*2, 16
+FIELD _aes_cmac_unused_lanes, 8, 8
+FIELD _aes_cmac_job_in_lane, 8*8, 8
+FIELD _aes_cmac_scratch, 8*16, 32
+END_FIELDS
+%assign _MB_MGR_CMAC_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_CMAC_OOO_align _STRUCT_ALIGN
+
+_aes_cmac_args_in equ _aes_cmac_args + _aesarg_in
+_aes_cmac_args_keys equ _aes_cmac_args + _aesarg_keys
+_aes_cmac_args_IV equ _aes_cmac_args + _aesarg_IV
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define CCM Out of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_CCM_OOO
+;; name size align
+FIELD _aes_ccm_args, _AES_ARGS_size, _AES_ARGS_align
+FIELD _aes_ccm_lens, 8*2, 16
+FIELD _aes_ccm_init_done, 8*2, 16
+FIELD _aes_ccm_unused_lanes, 8, 8
+FIELD _aes_ccm_job_in_lane, 8*8, 8
+FIELD _aes_ccm_init_blocks, 8*4*16, 32
+END_FIELDS
+%assign _MB_MGR_CCM_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_CCM_OOO_align _STRUCT_ALIGN
+
+_aes_ccm_args_in equ _aes_ccm_args + _aesarg_in
+_aes_ccm_args_keys equ _aes_ccm_args + _aesarg_keys
+_aes_ccm_args_IV equ _aes_ccm_args + _aesarg_IV
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define DES Out of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; DES_ARGS_X16
+;; name size align
+FIELD _desarg_in, 16*8, 8 ; array of 16 pointers to in text
+FIELD _desarg_out, 16*8, 8 ; array of 16 pointers to out text
+FIELD _desarg_keys, 16*8, 8 ; array of 16 pointers to keys
+FIELD _desarg_IV, 16*8, 32 ; array of 16 64-bit IV's
+FIELD _desarg_plen, 16*4, 32 ; array of 16 32-bit partial lens
+FIELD _desarg_blen, 16*4, 32 ; array of 16 32-bit block lens
+FIELD _desarg_lin, 16*8, 8 ; array of 16 pointers to last (block) in text
+FIELD _desarg_lout, 16*8, 8 ; array of 16 pointers to last (block) out text
+END_FIELDS
+%assign _DES_ARGS_X16_size _FIELD_OFFSET
+%assign _DES_ARGS_X16_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_DES_OOO
+;; name size align
+FIELD _des_args, _DES_ARGS_X16_size, _DES_ARGS_X16_align
+FIELD _des_lens, 16*2, 16
+FIELD _des_unused_lanes, 8, 8
+FIELD _des_job_in_lane, 16*8, 8
+FIELD _des_lanes_in_use, 8, 8
+END_FIELDS
+%assign _MB_MGR_DES_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_DES_OOO_align _STRUCT_ALIGN
+
+_des_args_in equ _des_args + _desarg_in
+_des_args_out equ _des_args + _desarg_out
+_des_args_keys equ _des_args + _desarg_keys
+_des_args_IV equ _des_args + _desarg_IV
+_des_args_PLen equ _des_args + _desarg_plen
+_des_args_BLen equ _des_args + _desarg_blen
+_des_args_LIn equ _des_args + _desarg_lin
+_des_args_LOut equ _des_args + _desarg_lout
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define HMAC Out Of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; HMAC_SHA1_LANE_DATA
+;;; name size align
+FIELD _extra_block, 2*64+8, 32 ; final block with padding
+FIELD _job_in_lane, 8, 8 ; pointer to job object
+FIELD _outer_block, 64, 1 ; block containing hash
+FIELD _outer_done, 4, 4 ; boolean flag
+FIELD _extra_blocks, 4, 4 ; num extra blocks (1 or 2)
+FIELD _size_offset, 4, 4 ; offset in extra_block to start of size
+FIELD _start_offset, 4, 4 ; offset to start of data
+END_FIELDS
+
+%assign _HMAC_SHA1_LANE_DATA_size _FIELD_OFFSET
+%assign _HMAC_SHA1_LANE_DATA_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; SHA512_LANE_DATA
+;;; name size align
+FIELD _extra_block_sha512, 2* SHA512_BLK_SZ + 16, 32 ; final block with padding, alignment 16 to read in XMM chunks
+FIELD _outer_block_sha512, SHA512_BLK_SZ, 1 ; block containing hash
+FIELD _job_in_lane_sha512, 8, 8 ; pointer to job object
+FIELD _outer_done_sha512, 4, 4 ; boolean flag
+FIELD _extra_blocks_sha512, 4, 4 ; num extra blocks (1 or 2)
+FIELD _size_offset_sha512, 4, 4 ; offset in extra_block to start of size
+FIELD _start_offset_sha512, 4, 4 ; offset to start of data
+END_FIELDS
+%assign _SHA512_LANE_DATA_size _FIELD_OFFSET
+%assign _SHA512_LANE_DATA_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; SHA1_ARGS
+;;; name size align
+FIELD _digest, SHA1_DIGEST_SIZE, 32 ; transposed digest
+FIELD _data_ptr_sha1, PTR_SZ*MAX_SHA1_LANES, 8 ; array of pointers to data
+END_FIELDS
+%assign _SHA1_ARGS_size _FIELD_OFFSET
+%assign _SHA1_ARGS_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_HMAC_SHA_1_OOO
+;;; name size align
+FIELD _args, _SHA1_ARGS_size, _SHA1_ARGS_align
+FIELD _lens, 32, 32
+FIELD _unused_lanes, 8, 8
+FIELD _ldata, _HMAC_SHA1_LANE_DATA_size*MAX_SHA1_LANES, _HMAC_SHA1_LANE_DATA_align
+FIELD _num_lanes_inuse_sha1, 4, 4
+END_FIELDS
+%assign _MB_MGR_HMAC_SHA_1_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_HMAC_SHA_1_OOO_align _STRUCT_ALIGN
+
+_args_digest equ _args + _digest
+_args_data_ptr equ _args + _data_ptr_sha1
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; SHA256_ARGS
+;;; name size align
+FIELD _digest_sha256, SHA256_DIGEST_SIZE, 32 ; transposed digest
+FIELD _data_ptr_sha256, PTR_SZ*MAX_SHA256_LANES, 8 ; array of pointers to data
+END_FIELDS
+%assign _SHA256_ARGS_size _FIELD_OFFSET
+%assign _SHA256_ARGS_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_HMAC_SHA_256_OOO
+;;; name size align
+FIELD _args_sha256, _SHA256_ARGS_size, _SHA256_ARGS_align
+FIELD _lens_sha256, 16*2, 16
+FIELD _unused_lanes_sha256, 8, 8
+FIELD _ldata_sha256, _HMAC_SHA1_LANE_DATA_size * MAX_SHA256_LANES, _HMAC_SHA1_LANE_DATA_align
+FIELD _num_lanes_inuse_sha256, 4, 4
+END_FIELDS
+%assign _MB_MGR_HMAC_SHA_256_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_HMAC_SHA_256_OOO_align _STRUCT_ALIGN
+
+_args_digest_sha256 equ _args_sha256 + _digest_sha256
+_args_data_ptr_sha256 equ _args_sha256 + _data_ptr_sha256
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define HMAC SHA512 Out Of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; SHA512_ARGS
+;;; name size align
+FIELD _digest_sha512, SHA512_DIGEST_SIZE, 32 ; transposed digest. 2 lanes, 8 digest words, each 8 bytes long
+FIELD _data_ptr_sha512, MAX_SHA512_LANES * PTR_SZ, 8 ; array of pointers to data
+END_FIELDS
+%assign _SHA512_ARGS_size _FIELD_OFFSET
+%assign _SHA512_ARGS_align _STRUCT_ALIGN
+
+
+;; ---------------------------------------
+START_FIELDS ; MB_MGR_HMAC_SHA512_OOO
+;;; name size align
+FIELD _args_sha512, _SHA512_ARGS_size, _SHA512_ARGS_align
+FIELD _lens_sha512, 16, 16
+FIELD _unused_lanes_sha512, 8, 8
+FIELD _ldata_sha512, _SHA512_LANE_DATA_size * MAX_SHA512_LANES, _SHA512_LANE_DATA_align
+END_FIELDS
+%assign _MB_MGR_HMAC_SHA_512_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_HMAC_SHA_512_OOO_align _STRUCT_ALIGN
+
+_args_digest_sha512 equ _args_sha512 + _digest_sha512
+_args_data_ptr_sha512 equ _args_sha512 + _data_ptr_sha512
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; Define HMAC MD5 Out Of Order Data Structures
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MD5_ARGS
+;;; name size align
+FIELD _digest_md5, MD5_DIGEST_SIZE, 32 ; transposed digest
+FIELD _data_ptr_md5, MAX_MD5_LANES*PTR_SZ, 8 ; array of pointers to data
+END_FIELDS
+%assign _MD5_ARGS_size _FIELD_OFFSET
+%assign _MD5_ARGS_align _STRUCT_ALIGN
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+START_FIELDS ; MB_MGR_HMAC_MD5_OOO
+;;; name size align
+FIELD _args_md5, _MD5_ARGS_size, _MD5_ARGS_align
+FIELD _lens_md5, MAX_MD5_LANES*2, 16
+FIELD _unused_lanes_md5, 8, 8
+FIELD _ldata_md5, _HMAC_SHA1_LANE_DATA_size * MAX_MD5_LANES, _HMAC_SHA1_LANE_DATA_align
+FIELD _num_lanes_inuse_md5, 4, 8
+END_FIELDS
+%assign _MB_MGR_HMAC_MD5_OOO_size _FIELD_OFFSET
+%assign _MB_MGR_HMAC_MD5_OOO_align _STRUCT_ALIGN
+
+_args_digest_md5 equ _args_md5 + _digest_md5
+_args_data_ptr_md5 equ _args_md5 + _data_ptr_md5
diff --git a/src/spdk/intel-ipsec-mb/md5_one_block.c b/src/spdk/intel-ipsec-mb/md5_one_block.c
new file mode 100644
index 000000000..064bf0400
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/md5_one_block.c
@@ -0,0 +1,232 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdint.h>
+#include "intel-ipsec-mb.h"
+#include "include/clear_regs_mem.h"
+
+#ifdef LINUX
+#define ROTATE(a, n) (((a) << (n)) ^ ((a) >> (32 - (n))))
+#else
+#include <intrin.h>
+#define ROTATE(a, n) _rotl(a, n)
+#endif
+
+#define H0 0x67452301
+#define H1 0xefcdab89
+#define H2 0x98badcfe
+#define H3 0x10325476
+
+#define F1(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F2(b, c, d) ((((b) ^ (c)) & (d)) ^ (c))
+#define F3(b, c, d) ((b) ^ (c) ^ (d))
+#define F4(b, c, d) (((~(d)) | (b)) ^ (c))
+
+#define STEP1(a, b, c, d, k, w, r) { \
+ a += w + k + F1(b, c, d); \
+ a = ROTATE(a, r); \
+ a += b; \
+ }
+#define STEP2(a, b, c, d, k, w, r) { \
+ a += w + k + F2(b, c, d); \
+ a = ROTATE(a, r); \
+ a += b; \
+ }
+#define STEP3(a, b, c, d, k, w, r) { \
+ a += w + k + F3(b, c, d); \
+ a = ROTATE(a, r); \
+ a += b; \
+ }
+#define STEP4(a, b, c, d, k, w, r) { \
+ a += w + k + F4(b, c, d); \
+ a = ROTATE(a, r); \
+ a += b; \
+ }
+
+enum arch_type {
+ ARCH_SSE = 0,
+ ARCH_AVX,
+ ARCH_AVX2,
+ ARCH_AVX512,
+};
+
+__forceinline
+void
+md5_one_block_common(const uint8_t *data, uint32_t digest[4],
+ const enum arch_type arch)
+{
+#ifdef SAFE_PARAM
+ if (data == NULL || digest == NULL)
+ return;
+#endif
+ uint32_t a, b, c, d;
+ uint32_t w[16];
+ const uint32_t *data32 = (const uint32_t *)data;
+
+ a = H0;
+ b = H1;
+ c = H2;
+ d = H3;
+
+ w[0] = data32[0];
+ w[1] = data32[1];
+
+ STEP1(a, b, c, d, 0xd76aa478, w[0], 7);
+ w[2] = data32[2];
+ STEP1(d, a, b, c, 0xe8c7b756, w[1], 12);
+ w[3] = data32[3];
+ STEP1(c, d, a, b, 0x242070db, w[2], 17);
+ w[4] = data32[4];
+ STEP1(b, c, d, a, 0xc1bdceee, w[3], 22);
+ w[5] = data32[5];
+ STEP1(a, b, c, d, 0xf57c0faf, w[4], 7);
+ w[6] = data32[6];
+ STEP1(d, a, b, c, 0x4787c62a, w[5], 12);
+ w[7] = data32[7];
+ STEP1(c, d, a, b, 0xa8304613, w[6], 17);
+ w[8] = data32[8];
+ STEP1(b, c, d, a, 0xfd469501, w[7], 22);
+ w[9] = data32[9];
+ STEP1(a, b, c, d, 0x698098d8, w[8], 7);
+ w[10] = data32[10];
+ STEP1(d, a, b, c, 0x8b44f7af, w[9], 12);
+ w[11] = data32[11];
+ STEP1(c, d, a, b, 0xffff5bb1, w[10], 17);
+ w[12] = data32[12];
+ STEP1(b, c, d, a, 0x895cd7be, w[11], 22);
+ w[13] = data32[13];
+ STEP1(a, b, c, d, 0x6b901122, w[12], 7);
+ w[14] = data32[14];
+ STEP1(d, a, b, c, 0xfd987193, w[13], 12);
+ w[15] = data32[15];
+ STEP1(c, d, a, b, 0xa679438e, w[14], 17);
+ STEP1(b, c, d, a, 0x49b40821, w[15], 22);
+ STEP2(a, b, c, d, 0xf61e2562, w[1], 5);
+ STEP2(d, a, b, c, 0xc040b340, w[6], 9);
+ STEP2(c, d, a, b, 0x265e5a51, w[11], 14);
+ STEP2(b, c, d, a, 0xe9b6c7aa, w[0], 20);
+ STEP2(a, b, c, d, 0xd62f105d, w[5], 5);
+ STEP2(d, a, b, c, 0x02441453, w[10], 9);
+ STEP2(c, d, a, b, 0xd8a1e681, w[15], 14);
+ STEP2(b, c, d, a, 0xe7d3fbc8, w[4], 20);
+ STEP2(a, b, c, d, 0x21e1cde6, w[9], 5);
+ STEP2(d, a, b, c, 0xc33707d6, w[14], 9);
+ STEP2(c, d, a, b, 0xf4d50d87, w[3], 14);
+ STEP2(b, c, d, a, 0x455a14ed, w[8], 20);
+ STEP2(a, b, c, d, 0xa9e3e905, w[13], 5);
+ STEP2(d, a, b, c, 0xfcefa3f8, w[2], 9);
+ STEP2(c, d, a, b, 0x676f02d9, w[7], 14);
+ STEP2(b, c, d, a, 0x8d2a4c8a, w[12], 20);
+ STEP3(a, b, c, d, 0xfffa3942, w[5], 4);
+ STEP3(d, a, b, c, 0x8771f681, w[8], 11);
+ STEP3(c, d, a, b, 0x6d9d6122, w[11], 16);
+ STEP3(b, c, d, a, 0xfde5380c, w[14], 23);
+ STEP3(a, b, c, d, 0xa4beea44, w[1], 4);
+ STEP3(d, a, b, c, 0x4bdecfa9, w[4], 11);
+ STEP3(c, d, a, b, 0xf6bb4b60, w[7], 16);
+ STEP3(b, c, d, a, 0xbebfbc70, w[10], 23);
+ STEP3(a, b, c, d, 0x289b7ec6, w[13], 4);
+ STEP3(d, a, b, c, 0xeaa127fa, w[0], 11);
+ STEP3(c, d, a, b, 0xd4ef3085, w[3], 16);
+ STEP3(b, c, d, a, 0x04881d05, w[6], 23);
+ STEP3(a, b, c, d, 0xd9d4d039, w[9], 4);
+ STEP3(d, a, b, c, 0xe6db99e5, w[12], 11);
+ STEP3(c, d, a, b, 0x1fa27cf8, w[15], 16);
+ STEP3(b, c, d, a, 0xc4ac5665, w[2], 23);
+ STEP4(a, b, c, d, 0xf4292244, w[0], 6);
+ STEP4(d, a, b, c, 0x432aff97, w[7], 10);
+ STEP4(c, d, a, b, 0xab9423a7, w[14], 15);
+ STEP4(b, c, d, a, 0xfc93a039, w[5], 21);
+ STEP4(a, b, c, d, 0x655b59c3, w[12], 6);
+ STEP4(d, a, b, c, 0x8f0ccc92, w[3], 10);
+ STEP4(c, d, a, b, 0xffeff47d, w[10], 15);
+ STEP4(b, c, d, a, 0x85845dd1, w[1], 21);
+ STEP4(a, b, c, d, 0x6fa87e4f, w[8], 6);
+ STEP4(d, a, b, c, 0xfe2ce6e0, w[15], 10);
+ STEP4(c, d, a, b, 0xa3014314, w[6], 15);
+ STEP4(b, c, d, a, 0x4e0811a1, w[13], 21);
+ STEP4(a, b, c, d, 0xf7537e82, w[4], 6);
+ STEP4(d, a, b, c, 0xbd3af235, w[11], 10);
+ STEP4(c, d, a, b, 0x2ad7d2bb, w[2], 15);
+ STEP4(b, c, d, a, 0xeb86d391, w[9], 21);
+
+ digest[0] = a + H0;
+ digest[1] = b + H1;
+ digest[2] = c + H2;
+ digest[3] = d + H3;
+#ifdef SAFE_DATA
+ clear_var(&a, sizeof(a));
+ clear_var(&b, sizeof(b));
+ clear_var(&c, sizeof(c));
+ clear_var(&d, sizeof(d));
+ clear_mem(w, sizeof(w));
+ clear_scratch_gps();
+ switch(arch) {
+ case ARCH_SSE:
+ clear_scratch_xmms_sse();
+ break;
+ case ARCH_AVX:
+ clear_scratch_xmms_avx();
+ break;
+ case ARCH_AVX2:
+ clear_scratch_ymms();
+ break;
+ case ARCH_AVX512:
+ clear_scratch_zmms();
+ break;
+ default:
+ break;
+ }
+#else
+ (void) arch; /* unused */
+#endif
+}
+
+void
+md5_one_block_sse(const void *data, void *digest)
+{
+ md5_one_block_common(data, digest, ARCH_SSE);
+}
+
+void
+md5_one_block_avx(const void *data, void *digest)
+{
+ md5_one_block_common(data, digest, ARCH_AVX);
+}
+
+void
+md5_one_block_avx2(const void *data, void *digest)
+{
+ md5_one_block_common(data, digest, ARCH_AVX2);
+}
+
+void
+md5_one_block_avx512(const void *data, void *digest)
+{
+ md5_one_block_common(data, digest, ARCH_AVX512);
+}
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_dec_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_dec_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..84c89753a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_dec_by4_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_DEC_128 aes_cbc_dec_128_sse_no_aesni
+%include "sse/aes128_cbc_dec_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_mac_x4_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_mac_x4_no_aesni.asm
new file mode 100644
index 000000000..885955509
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cbc_mac_x4_no_aesni.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; Routine to compute CBC-MAC based on 128 bit CBC AES encryption code
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_ENC_X4
+%define CBC_MAC
+%include "sse/aes_cbc_enc_128_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..b255da320
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_by4_sse_no_aesni.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CNTR_128 aes_cntr_128_sse_no_aesni
+%define AES_CNTR_BIT_128 aes_cntr_bit_128_sse_no_aesni
+%include "sse/aes128_cntr_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_ccm_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_ccm_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..5c6662093
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes128_cntr_ccm_by4_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CNTR_CCM_128 aes_cntr_ccm_128_sse_no_aesni
+%include "sse/aes128_cntr_ccm_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes192_cbc_dec_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes192_cbc_dec_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..59300fbe9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes192_cbc_dec_by4_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_DEC_192 aes_cbc_dec_192_sse_no_aesni
+%include "sse/aes192_cbc_dec_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes192_cntr_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes192_cntr_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..a0d07339b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes192_cntr_by4_sse_no_aesni.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CNTR_192 aes_cntr_192_sse_no_aesni
+%define AES_CNTR_BIT_192 aes_cntr_bit_192_sse_no_aesni
+%include "sse/aes192_cntr_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes256_cbc_dec_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes256_cbc_dec_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..9f61da5f4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes256_cbc_dec_by4_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_DEC_256 aes_cbc_dec_256_sse_no_aesni
+%include "sse/aes256_cbc_dec_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes256_cntr_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes256_cntr_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..a99faff80
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes256_cntr_by4_sse_no_aesni.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CNTR_256 aes_cntr_256_sse_no_aesni
+%define AES_CNTR_BIT_256 aes_cntr_bit_256_sse_no_aesni
+%include "sse/aes256_cntr_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_128_x4_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_128_x4_no_aesni.asm
new file mode 100644
index 000000000..7ac65842a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_128_x4_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_ENC_X4
+%include "sse/aes_cbc_enc_128_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_192_x4_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_192_x4_no_aesni.asm
new file mode 100644
index 000000000..795af8d2b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_192_x4_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4_no_aesni
+%include "sse/aes_cbc_enc_192_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_256_x4_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_256_x4_no_aesni.asm
new file mode 100644
index 000000000..31fd45670
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_cbc_enc_256_x4_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4_no_aesni
+%include "sse/aes_cbc_enc_256_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_cfb_128_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_cfb_128_sse_no_aesni.asm
new file mode 100644
index 000000000..7cd19cbb7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_cfb_128_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_CFB_128_ONE aes_cfb_128_one_sse_no_aesni
+%include "sse/aes_cfb_128_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_ecb_by4_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_ecb_by4_sse_no_aesni.asm
new file mode 100644
index 000000000..56f8db502
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_ecb_by4_sse_no_aesni.asm
@@ -0,0 +1,35 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_ECB_ENC_128 aes_ecb_enc_128_sse_no_aesni
+%define AES_ECB_ENC_192 aes_ecb_enc_192_sse_no_aesni
+%define AES_ECB_ENC_256 aes_ecb_enc_256_sse_no_aesni
+%define AES_ECB_DEC_128 aes_ecb_dec_128_sse_no_aesni
+%define AES_ECB_DEC_192 aes_ecb_dec_192_sse_no_aesni
+%define AES_ECB_DEC_256 aes_ecb_dec_256_sse_no_aesni
+%include "sse/aes_ecb_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aes_xcbc_mac_128_x4_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/aes_xcbc_mac_128_x4_no_aesni.asm
new file mode 100644
index 000000000..0450e58e8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aes_xcbc_mac_128_x4_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4_no_aesni
+%include "sse/aes_xcbc_mac_128_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/aesni_emu.c b/src/spdk/intel-ipsec-mb/no-aesni/aesni_emu.c
new file mode 100644
index 000000000..908f7bfd0
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/aesni_emu.c
@@ -0,0 +1,375 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/* ========================================================================== */
+/* AESNI emulation API and helper functions */
+/* ========================================================================== */
+
+#include "intel-ipsec-mb.h"
+#include "aesni_emu.h"
+#include "include/constant_lookup.h"
+
+typedef union {
+ uint32_t i;
+ uint8_t byte[4];
+} byte_split_t;
+
+static const uint8_t aes_sbox[16][16] = {
+ { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 },
+ { 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 },
+ { 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 },
+ { 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 },
+ { 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 },
+ { 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf },
+ { 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 },
+ { 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 },
+ { 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 },
+ { 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb },
+ { 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 },
+ { 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 },
+ { 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a },
+ { 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e },
+ { 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf },
+ { 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }
+};
+
+static const uint8_t aes_isbox[16][16] = {
+ { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
+ 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb },
+ { 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb },
+ { 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
+ 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e },
+ { 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
+ 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 },
+ { 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 },
+ { 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
+ 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 },
+ { 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
+ 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 },
+ { 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b },
+ { 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
+ 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 },
+ { 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
+ 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e },
+ { 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b },
+ { 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
+ 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 },
+ { 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
+ 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f },
+ { 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef },
+ { 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
+ 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 },
+ { 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
+ 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }
+};
+
+/* ========================================================================== */
+/* Emulation API helper functions */
+/* ========================================================================== */
+
+static uint8_t aes_get_sbox(const uint32_t x)
+{
+#ifdef SAFE_LOOKUP
+ return lookup_8bit_sse(aes_sbox, (x & 0xFF), 256);
+#else
+ uint32_t i = (x>>4) & 0xF;
+ uint32_t j = x&0xF;
+
+ return aes_sbox[i][j];
+#endif
+}
+
+static uint8_t aes_get_isbox(const uint32_t x)
+{
+#ifdef SAFE_LOOKUP
+ return lookup_8bit_sse(aes_isbox, (x & 0xFF), 256);
+#else
+ uint32_t i = (x>>4) & 0xF;
+ uint32_t j = x&0xF;
+
+ return aes_isbox[i][j];
+#endif
+}
+
+static void xor_xmm(union xmm_reg *d,
+ const union xmm_reg *s1,
+ const union xmm_reg *s2)
+{
+ uint32_t i;
+
+ for (i = 0; i < MAX_QWORDS_PER_XMM; i++)
+ d->qword[i] = s1->qword[i] ^ s2->qword[i];
+}
+
+static uint32_t rot(const uint32_t x)
+{
+ uint32_t y = (x>>8) | (x<<24);
+
+ return y;
+}
+
+static uint32_t sbox4(const uint32_t x)
+{
+ uint32_t i;
+ byte_split_t b, o;
+
+ b.i = x;
+
+ for (i = 0; i < 4; i++)
+ o.byte[i] = aes_get_sbox(b.byte[i]);
+
+ return o.i;
+}
+
+static void substitute_bytes(union xmm_reg *dst, const union xmm_reg *src)
+{
+ uint32_t i;
+
+ for (i = 0; i < MAX_BYTES_PER_XMM; i++)
+ dst->byte[i] = aes_get_sbox(src->byte[i]);
+}
+
+static void inverse_substitute_bytes(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ uint32_t i;
+
+ for (i = 0; i < MAX_BYTES_PER_XMM; i++)
+ dst->byte[i] = aes_get_isbox(src->byte[i]);
+}
+
+static uint8_t gfmul(const uint8_t x, const uint8_t y)
+{
+ uint32_t i;
+ uint8_t multiplier = y;
+ uint8_t out = 0;
+
+ for (i = 0; i < 7; i++) {
+ if (i >= 1) {
+ /* GFMUL by 2. "xtimes" operation from FIPS document */
+ uint8_t t = multiplier << 1; /* lop of the high bit */
+
+ if (multiplier >> 7) /* look at the old high bit */
+ multiplier = t ^ 0x1B; /* polynomial division */
+ else
+ multiplier = t;
+ }
+ if ((x >> i) & 1)
+ out = out ^ multiplier;
+ }
+
+ return out;
+}
+
+static void mix_columns(union xmm_reg *dst, const union xmm_reg *src)
+{
+ uint32_t c;
+
+ for (c = 0; c < MAX_DWORDS_PER_XMM; c++) {
+ uint8_t s0c = src->byte[c*4+0];
+ uint8_t s1c = src->byte[c*4+1];
+ uint8_t s2c = src->byte[c*4+2];
+ uint8_t s3c = src->byte[c*4+3];
+
+ dst->byte[c*4+0] = gfmul(2, s0c) ^ gfmul(3, s1c) ^ s2c ^ s3c;
+ dst->byte[c*4+1] = s0c ^ gfmul(2, s1c) ^ gfmul(3, s2c) ^ s3c;
+ dst->byte[c*4+2] = s0c ^ s1c ^ gfmul(2, s2c) ^ gfmul(3, s3c);
+ dst->byte[c*4+3] = gfmul(3, s0c) ^ s1c ^ s2c ^ gfmul(2, s3c);
+ }
+}
+
+static void inverse_mix_columns(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ uint32_t c;
+
+ for (c = 0; c < MAX_DWORDS_PER_XMM; c++) {
+ uint8_t s0c = src->byte[c*4+0];
+ uint8_t s1c = src->byte[c*4+1];
+ uint8_t s2c = src->byte[c*4+2];
+ uint8_t s3c = src->byte[c*4+3];
+
+ dst->byte[c*4+0] = gfmul(0xe, s0c) ^ gfmul(0xb, s1c) ^
+ gfmul(0xd, s2c) ^ gfmul(0x9, s3c);
+ dst->byte[c*4+1] = gfmul(0x9, s0c) ^ gfmul(0xe, s1c) ^
+ gfmul(0xb, s2c) ^ gfmul(0xd, s3c);
+ dst->byte[c*4+2] = gfmul(0xd, s0c) ^ gfmul(0x9, s1c) ^
+ gfmul(0xe, s2c) ^ gfmul(0xb, s3c);
+ dst->byte[c*4+3] = gfmul(0xb, s0c) ^ gfmul(0xd, s1c) ^
+ gfmul(0x9, s2c) ^ gfmul(0xe, s3c);
+ }
+}
+
+static uint32_t wrap_neg(const int x)
+{
+ /* make sure we stay in 0..3 */
+ return (x >= 0) ? x : (x + 4);
+}
+
+static uint32_t wrap_pos(const int x)
+{
+ /* make sure we stay in 0..3 */
+ return (x <= 3) ? x : (x - 4);
+}
+
+static void shift_rows(union xmm_reg *dst, const union xmm_reg *src)
+{
+ /* cyclic shift last 3 rows of the input */
+ int j;
+ union xmm_reg tmp = *src;
+
+ /* bytes to matrix:
+ 0 1 2 3 < columns (i)
+ ----------+
+ 0 4 8 C | 0 < rows (j)
+ 1 5 9 D | 1
+ 2 6 A E | 2
+ 3 7 B F | 3
+
+ THIS IS THE KEY: progressively move elements to HIGHER
+ numbered columnar values within a row.
+
+ Each dword is a column with the MSB as the bottom element
+ i is the column index, selects the dword
+ j is the row index,
+ we shift row zero by zero, row 1 by 1 and row 2 by 2 and
+ row 3 by 3, cyclically */
+ for (j = 0; j < MAX_DWORDS_PER_XMM; j++) {
+ int i;
+
+ for (i = 0; i < MAX_DWORDS_PER_XMM; i++)
+ dst->byte[i*4+j] = tmp.byte[wrap_pos(i+j)*4+j];
+ }
+
+}
+
+static void inverse_shift_rows(union xmm_reg *dst, const union xmm_reg *src)
+{
+ uint32_t j;
+ union xmm_reg tmp = *src;
+
+ /* THIS IS THE KEY: progressively move elements to LOWER
+ numbered columnar values within a row.
+
+ Each dword is a column with the MSB as the bottom element
+ i is the column index, selects the dword
+ j is the row index,
+ we shift row zero by zero, row 1 by 1 and row 2 by 2 and
+ row 3 by 3, cyclically */
+ for (j = 0; j < MAX_DWORDS_PER_XMM; j++) {
+ uint32_t i;
+
+ for (i = 0; i < MAX_DWORDS_PER_XMM; i++)
+ dst->byte[i*4+j] = tmp.byte[wrap_neg(i - j) * 4 + j];
+ }
+}
+
+/* ========================================================================== */
+/* AESNI emulation functions */
+/* ========================================================================== */
+
+IMB_DLL_LOCAL void emulate_AESKEYGENASSIST(union xmm_reg *dst,
+ const union xmm_reg *src,
+ const uint32_t imm8)
+{
+ union xmm_reg tmp = *src;
+ uint32_t rcon = (imm8 & 0xFF);
+
+ dst->dword[3] = rot(sbox4(tmp.dword[3])) ^ rcon;
+ dst->dword[2] = sbox4(tmp.dword[3]);
+ dst->dword[1] = rot(sbox4(tmp.dword[1])) ^ rcon;
+ dst->dword[0] = sbox4(tmp.dword[1]);
+}
+
+IMB_DLL_LOCAL void emulate_AESENC(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ union xmm_reg tmp = *dst;
+
+ shift_rows(&tmp, &tmp);
+ substitute_bytes(&tmp, &tmp);
+ mix_columns(&tmp, &tmp);
+ xor_xmm(dst, &tmp, src);
+}
+
+IMB_DLL_LOCAL void emulate_AESENCLAST(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ union xmm_reg tmp = *dst;
+
+ shift_rows(&tmp, &tmp);
+ substitute_bytes(&tmp, &tmp);
+ xor_xmm(dst, &tmp, src);
+}
+
+IMB_DLL_LOCAL void emulate_AESDEC(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ union xmm_reg tmp = *dst;
+
+ inverse_shift_rows(&tmp, &tmp);
+ inverse_substitute_bytes(&tmp, &tmp);
+ inverse_mix_columns(&tmp, &tmp);
+ xor_xmm(dst, &tmp, src);
+}
+
+IMB_DLL_LOCAL void emulate_AESDECLAST(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ union xmm_reg tmp = *dst;
+
+ inverse_shift_rows(&tmp, &tmp);
+ inverse_substitute_bytes(&tmp, &tmp);
+ xor_xmm(dst, &tmp, src);
+}
+
+IMB_DLL_LOCAL void emulate_AESIMC(union xmm_reg *dst,
+ const union xmm_reg *src)
+{
+ inverse_mix_columns(dst, src);
+}
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/gcm128_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/gcm128_sse_no_aesni.asm
new file mode 100644
index 000000000..a77d88b89
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/gcm128_sse_no_aesni.asm
@@ -0,0 +1,33 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%include "include/aesni_emu.inc"
+%define NO_AESNI
+%define GCM128_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/gcm192_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/gcm192_sse_no_aesni.asm
new file mode 100644
index 000000000..f8fa79849
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/gcm192_sse_no_aesni.asm
@@ -0,0 +1,33 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%include "include/aesni_emu.inc"
+%define NO_AESNI
+%define GCM192_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/gcm256_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/gcm256_sse_no_aesni.asm
new file mode 100644
index 000000000..18105c656
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/gcm256_sse_no_aesni.asm
@@ -0,0 +1,33 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%include "include/aesni_emu.inc"
+%define NO_AESNI
+%define GCM256_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..27216e222
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_flush_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4_no_aesni
+%define FLUSH_JOB_AES_ENC flush_job_aes192_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_submit_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_submit_sse_no_aesni.asm
new file mode 100644
index 000000000..a7774a96b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes192_submit_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4_no_aesni
+%define SUBMIT_JOB_AES_ENC submit_job_aes192_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..942b11c51
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_flush_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4_no_aesni
+%define FLUSH_JOB_AES_ENC flush_job_aes256_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_submit_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_submit_sse_no_aesni.asm
new file mode 100644
index 000000000..359e4b46d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes256_submit_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4_no_aesni
+%define SUBMIT_JOB_AES_ENC submit_job_aes256_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..0c00ee430
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.asm
@@ -0,0 +1,32 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define AES128_CBC_MAC aes128_cbc_mac_x4_no_aesni
+%define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_sse_no_aesni
+%define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_sse_no_aesni
+%include "sse/mb_mgr_aes_ccm_auth_submit_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_cmac_submit_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_cmac_submit_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..4b59ded8a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_cmac_submit_flush_sse_no_aesni.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES128_CBC_MAC aes128_cbc_mac_x4_no_aesni
+%define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_sse_no_aesni
+%define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_sse_no_aesni
+%include "sse/mb_mgr_aes_cmac_submit_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..fff86a321
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_flush_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_128_x4_no_aesni
+%define FLUSH_JOB_AES_ENC flush_job_aes128_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_submit_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_submit_sse_no_aesni.asm
new file mode 100644
index 000000000..de460549f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_submit_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_128_x4_no_aesni
+%define SUBMIT_JOB_AES_ENC submit_job_aes128_enc_sse_no_aesni
+%include "sse/mb_mgr_aes_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_flush_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_flush_sse_no_aesni.asm
new file mode 100644
index 000000000..02748d811
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_flush_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4_no_aesni
+%define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_sse_no_aesni
+%include "sse/mb_mgr_aes_xcbc_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_submit_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_submit_sse_no_aesni.asm
new file mode 100644
index 000000000..cc7c3f4f8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_aes_xcbc_submit_sse_no_aesni.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4_no_aesni
+%define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_sse_no_aesni
+%include "sse/mb_mgr_aes_xcbc_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_sse_no_aesni.c b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_sse_no_aesni.c
new file mode 100644
index 000000000..947dfe92c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/mb_mgr_sse_no_aesni.c
@@ -0,0 +1,734 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+#include "intel-ipsec-mb.h"
+#include "include/kasumi_internal.h"
+#include "include/zuc_internal.h"
+#include "include/snow3g.h"
+
+#include "save_xmms.h"
+#include "asm.h"
+#include "des.h"
+#include "gcm.h"
+#include "noaesni.h"
+
+/* ====================================================================== */
+
+JOB_AES_HMAC *submit_job_aes128_enc_sse_no_aesni(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes128_enc_sse_no_aesni(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_sse_no_aesni(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes192_enc_sse_no_aesni(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_sse_no_aesni(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes256_enc_sse_no_aesni(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_384_sse(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_384_sse(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_512_sse(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_512_sse(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_xcbc_sse_no_aesni(MB_MGR_AES_XCBC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_xcbc_sse_no_aesni(MB_MGR_AES_XCBC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cmac_auth_sse_no_aesni(MB_MGR_CMAC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_cmac_auth_sse_no_aesni(MB_MGR_CMAC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_ccm_auth_sse_no_aesni(MB_MGR_CCM_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_ccm_auth_sse_no_aesni(MB_MGR_CCM_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cntr_sse_no_aesni(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_aes_cntr_bit_sse_no_aesni(JOB_AES_HMAC *job);
+
+#define SAVE_XMMS save_xmms
+#define RESTORE_XMMS restore_xmms
+
+#define SUBMIT_JOB_AES128_ENC submit_job_aes128_enc_sse_no_aesni
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_sse_no_aesni
+#define FLUSH_JOB_AES128_ENC flush_job_aes128_enc_sse_no_aesni
+#define SUBMIT_JOB_AES192_ENC submit_job_aes192_enc_sse_no_aesni
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_sse_no_aesni
+#define FLUSH_JOB_AES192_ENC flush_job_aes192_enc_sse_no_aesni
+#define SUBMIT_JOB_AES256_ENC submit_job_aes256_enc_sse_no_aesni
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_sse_no_aesni
+#define FLUSH_JOB_AES256_ENC flush_job_aes256_enc_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_128_ENC submit_job_aes_ecb_128_enc_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_128_DEC submit_job_aes_ecb_128_dec_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_192_ENC submit_job_aes_ecb_192_enc_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_192_DEC submit_job_aes_ecb_192_dec_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_256_ENC submit_job_aes_ecb_256_enc_sse_no_aesni
+#define SUBMIT_JOB_AES_ECB_256_DEC submit_job_aes_ecb_256_dec_sse_no_aesni
+#define SUBMIT_JOB_HMAC submit_job_hmac_sse
+#define FLUSH_JOB_HMAC flush_job_hmac_sse
+#define SUBMIT_JOB_HMAC_NI submit_job_hmac_sse
+#define FLUSH_JOB_HMAC_NI flush_job_hmac_sse
+#define SUBMIT_JOB_HMAC_SHA_224 submit_job_hmac_sha_224_sse
+#define FLUSH_JOB_HMAC_SHA_224 flush_job_hmac_sha_224_sse
+#define SUBMIT_JOB_HMAC_SHA_224_NI submit_job_hmac_sha_224_sse
+#define FLUSH_JOB_HMAC_SHA_224_NI flush_job_hmac_sha_224_sse
+#define SUBMIT_JOB_HMAC_SHA_256 submit_job_hmac_sha_256_sse
+#define FLUSH_JOB_HMAC_SHA_256 flush_job_hmac_sha_256_sse
+#define SUBMIT_JOB_HMAC_SHA_256_NI submit_job_hmac_sha_256_sse
+#define FLUSH_JOB_HMAC_SHA_256_NI flush_job_hmac_sha_256_sse
+#define SUBMIT_JOB_HMAC_SHA_384 submit_job_hmac_sha_384_sse
+#define FLUSH_JOB_HMAC_SHA_384 flush_job_hmac_sha_384_sse
+#define SUBMIT_JOB_HMAC_SHA_512 submit_job_hmac_sha_512_sse
+#define FLUSH_JOB_HMAC_SHA_512 flush_job_hmac_sha_512_sse
+#define SUBMIT_JOB_HMAC_MD5 submit_job_hmac_md5_sse
+#define FLUSH_JOB_HMAC_MD5 flush_job_hmac_md5_sse
+#define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_sse_no_aesni
+#define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_sse_no_aesni
+
+#define SUBMIT_JOB_AES_CNTR submit_job_aes_cntr_sse_no_aesni
+#define SUBMIT_JOB_AES_CNTR_BIT submit_job_aes_cntr_bit_sse_no_aesni
+
+#define AES_CBC_DEC_128 aes_cbc_dec_128_sse_no_aesni
+#define AES_CBC_DEC_192 aes_cbc_dec_192_sse_no_aesni
+#define AES_CBC_DEC_256 aes_cbc_dec_256_sse_no_aesni
+
+#define AES_CNTR_128 aes_cntr_128_sse_no_aesni
+#define AES_CNTR_192 aes_cntr_192_sse_no_aesni
+#define AES_CNTR_256 aes_cntr_256_sse_no_aesni
+
+#define AES_CNTR_CCM_128 aes_cntr_ccm_128_sse_no_aesni
+
+#define AES_ECB_ENC_128 aes_ecb_enc_128_sse_no_aesni
+#define AES_ECB_ENC_192 aes_ecb_enc_192_sse_no_aesni
+#define AES_ECB_ENC_256 aes_ecb_enc_256_sse_no_aesni
+#define AES_ECB_DEC_128 aes_ecb_dec_128_sse_no_aesni
+#define AES_ECB_DEC_192 aes_ecb_dec_192_sse_no_aesni
+#define AES_ECB_DEC_256 aes_ecb_dec_256_sse_no_aesni
+
+#define SUBMIT_JOB_PON_ENC submit_job_pon_enc_sse_no_aesni
+#define SUBMIT_JOB_PON_DEC submit_job_pon_dec_sse_no_aesni
+#define SUBMIT_JOB_PON_ENC_NO_CTR submit_job_pon_enc_no_ctr_sse_no_aesni
+#define SUBMIT_JOB_PON_DEC_NO_CTR submit_job_pon_dec_no_ctr_sse_no_aesni
+
+#ifndef NO_GCM
+#define AES_GCM_DEC_128 aes_gcm_dec_128_sse_no_aesni
+#define AES_GCM_ENC_128 aes_gcm_enc_128_sse_no_aesni
+#define AES_GCM_DEC_192 aes_gcm_dec_192_sse_no_aesni
+#define AES_GCM_ENC_192 aes_gcm_enc_192_sse_no_aesni
+#define AES_GCM_DEC_256 aes_gcm_dec_256_sse_no_aesni
+#define AES_GCM_ENC_256 aes_gcm_enc_256_sse_no_aesni
+
+#define SUBMIT_JOB_AES_GCM_DEC submit_job_aes_gcm_dec_sse_no_aesni
+#define FLUSH_JOB_AES_GCM_DEC flush_job_aes_gcm_dec_sse_no_aesni
+#define SUBMIT_JOB_AES_GCM_ENC submit_job_aes_gcm_enc_sse_no_aesni
+#define FLUSH_JOB_AES_GCM_ENC flush_job_aes_gcm_enc_sse_no_aesni
+#endif /* NO_GCM */
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB submit_job_sse_no_aesni
+#define FLUSH_JOB flush_job_sse_no_aesni
+#define SUBMIT_JOB_NOCHECK submit_job_nocheck_sse_no_aesni
+#define GET_NEXT_JOB get_next_job_sse_no_aesni
+#define GET_COMPLETED_JOB get_completed_job_sse_no_aesni
+
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_sse_no_aesni
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_sse_no_aesni
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_sse_no_aesni
+#define QUEUE_SIZE queue_size_sse_no_aesni
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB_AES_ENC SUBMIT_JOB_AES_ENC_SSE
+#define FLUSH_JOB_AES_ENC FLUSH_JOB_AES_ENC_SSE
+#define SUBMIT_JOB_AES_DEC SUBMIT_JOB_AES_DEC_SSE
+#define SUBMIT_JOB_HASH SUBMIT_JOB_HASH_SSE
+#define FLUSH_JOB_HASH FLUSH_JOB_HASH_SSE
+
+/* ====================================================================== */
+
+#define AES_CFB_128_ONE aes_cfb_128_one_sse_no_aesni
+
+void aes128_cbc_mac_x4_no_aesni(AES_ARGS *args, uint64_t len);
+
+#define AES128_CBC_MAC aes128_cbc_mac_x4_no_aesni
+
+#define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_sse_no_aesni
+#define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_sse_no_aesni
+
+#define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_sse_no_aesni
+#define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_sse_no_aesni
+
+
+/* ====================================================================== */
+
+/*
+ * GCM submit / flush API for SSE arch without AESNI
+ */
+#ifndef NO_GCM
+static JOB_AES_HMAC *
+submit_job_aes_gcm_dec_sse_no_aesni(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_dec_sse_no_aesni(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+
+static JOB_AES_HMAC *
+submit_job_aes_gcm_enc_sse_no_aesni(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_enc_sse_no_aesni(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+#endif /* NO_GCM */
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_sse_no_aesni(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ AES_CNTR_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_CNTR_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_CNTR_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_bit_sse_no_aesni(JOB_AES_HMAC *job)
+{
+ const uint64_t offset = job->cipher_start_src_offset_in_bytes;
+
+ if (16 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_128_sse_no_aesni(job->src + offset,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_192_sse_no_aesni(job->src + offset,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ aes_cntr_bit_256_sse_no_aesni(job->src + offset,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ====================================================================== */
+
+void
+init_mb_mgr_sse_no_aesni(MB_MGR *state)
+{
+ unsigned int j;
+ uint8_t *p;
+ size_t size;
+
+ /* Init AES out-of-order fields */
+ memset(state->aes128_ooo.lens, 0xFF,
+ sizeof(state->aes128_ooo.lens));
+ memset(&state->aes128_ooo.lens[0], 0,
+ sizeof(state->aes128_ooo.lens[0]) * 4);
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xFF03020100;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+
+ memset(state->aes192_ooo.lens, 0xFF,
+ sizeof(state->aes192_ooo.lens));
+ memset(&state->aes192_ooo.lens[0], 0,
+ sizeof(state->aes192_ooo.lens[0]) * 4);
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xFF03020100;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+
+ memset(state->aes256_ooo.lens, 0xFF,
+ sizeof(state->aes256_ooo.lens));
+ memset(&state->aes256_ooo.lens[0], 0,
+ sizeof(state->aes256_ooo.lens[0]) * 4);
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xFF03020100;
+ state->aes256_ooo.num_lanes_inuse = 0;
+
+
+ /* DOCSIS SEC BPI uses same settings as AES128 CBC */
+ memset(state->docsis_sec_ooo.lens, 0xFF,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(&state->docsis_sec_ooo.lens[0], 0,
+ sizeof(state->docsis_sec_ooo.lens[0]) * 4);
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xFF03020100;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+
+
+ /* Init HMAC/SHA1 out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0;
+ state->hmac_sha_1_ooo.lens[3] = 0;
+ state->hmac_sha_1_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_1_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA1_LANES; j++) {
+ state->hmac_sha_1_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_1_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_1_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_1_ooo.ldata[j].outer_block;
+ memset(p + 5*4 + 1,
+ 0x00,
+ 64 - 5*4 - 1 - 2);
+ p[5*4] = 0x80;
+ p[64-2] = 0x02;
+ p[64-1] = 0xA0;
+ }
+
+ /* Init HMAC/SHA224 out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0;
+ state->hmac_sha_224_ooo.lens[3] = 0;
+ state->hmac_sha_224_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_224_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_224_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_sha_224_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_sha_224_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[7*4] = 0x80; /* digest 7 words long */
+ p[64-2] = 0x02; /* length in little endian = 0x02E0 */
+ p[64-1] = 0xE0;
+ }
+
+ /* Init HMAC/SHA_256 out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0;
+ state->hmac_sha_256_ooo.lens[3] = 0;
+ state->hmac_sha_256_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_256_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_256_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_256_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_256_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_256_ooo.ldata[j].outer_block;
+ memset(p + 8*4 + 1,
+ 0x00,
+ 64 - 8*4 - 1 - 2); /* digest is 8*4 bytes long */
+ p[8*4] = 0x80;
+ p[64-2] = 0x03; /* length of (opad (64*8) bits + 256 bits)
+ * in hex is 0x300 */
+ p[64-1] = 0x00;
+ }
+
+ /* Init HMAC/SHA384 out-of-order fields */
+ state->hmac_sha_384_ooo.lens[0] = 0;
+ state->hmac_sha_384_ooo.lens[1] = 0;
+ state->hmac_sha_384_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_384_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < SSE_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_384_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_384_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_384_BLOCK_SIZE + 1),
+ 0x00, SHA_384_BLOCK_SIZE + 7);
+
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA384_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_384_BLOCK_SIZE -
+ SHA384_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ p[SHA384_DIGEST_SIZE_IN_BYTES] = 0x80; /* mark the end */
+ /*
+ * hmac outer block length always of fixed size, it is OKey
+ * length, a whole message block length, 1024 bits, with padding
+ * plus the length of the inner digest, which is 384 bits
+ * 1408 bits == 0x0580. The input message block needs to be
+ * converted to big endian within the sha implementation
+ * before use.
+ */
+ p[SHA_384_BLOCK_SIZE - 2] = 0x05;
+ p[SHA_384_BLOCK_SIZE - 1] = 0x80;
+ }
+
+ /* Init HMAC/SHA512 out-of-order fields */
+ state->hmac_sha_512_ooo.lens[0] = 0;
+ state->hmac_sha_512_ooo.lens[1] = 0;
+ state->hmac_sha_512_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_512_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < SSE_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_512_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_512_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_512_BLOCK_SIZE + 1),
+ 0x00, SHA_512_BLOCK_SIZE + 7);
+
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA512_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_512_BLOCK_SIZE -
+ SHA512_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ p[SHA512_DIGEST_SIZE_IN_BYTES] = 0x80; /* mark the end */
+ /*
+ * hmac outer block length always of fixed size, it is OKey
+ * length, a whole message block length, 1024 bits, with padding
+ * plus the length of the inner digest, which is 512 bits
+ * 1536 bits == 0x600. The input message block needs to be
+ * converted to big endian within the sha implementation
+ * before use.
+ */
+ p[SHA_512_BLOCK_SIZE - 2] = 0x06;
+ p[SHA_512_BLOCK_SIZE - 1] = 0x00;
+ }
+
+ /* Init HMAC/MD5 out-of-order fields */
+ state->hmac_md5_ooo.lens[0] = 0;
+ state->hmac_md5_ooo.lens[1] = 0;
+ state->hmac_md5_ooo.lens[2] = 0;
+ state->hmac_md5_ooo.lens[3] = 0;
+ state->hmac_md5_ooo.lens[4] = 0;
+ state->hmac_md5_ooo.lens[5] = 0;
+ state->hmac_md5_ooo.lens[6] = 0;
+ state->hmac_md5_ooo.lens[7] = 0;
+ state->hmac_md5_ooo.lens[8] = 0xFFFF;
+ state->hmac_md5_ooo.lens[9] = 0xFFFF;
+ state->hmac_md5_ooo.lens[10] = 0xFFFF;
+ state->hmac_md5_ooo.lens[11] = 0xFFFF;
+ state->hmac_md5_ooo.lens[12] = 0xFFFF;
+ state->hmac_md5_ooo.lens[13] = 0xFFFF;
+ state->hmac_md5_ooo.lens[14] = 0xFFFF;
+ state->hmac_md5_ooo.lens[15] = 0xFFFF;
+ state->hmac_md5_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < SSE_NUM_MD5_LANES; j++) {
+ state->hmac_md5_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_md5_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_md5_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[4*4] = 0x80;
+ p[64-7] = 0x02;
+ p[64-8] = 0x80;
+ }
+
+ /* Init AES/XCBC OOO fields */
+ state->aes_xcbc_ooo.lens[0] = 0;
+ state->aes_xcbc_ooo.lens[1] = 0;
+ state->aes_xcbc_ooo.lens[2] = 0;
+ state->aes_xcbc_ooo.lens[3] = 0;
+ state->aes_xcbc_ooo.lens[4] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[5] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[6] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[7] = 0xFFFF;
+ state->aes_xcbc_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < 4; j++) {
+ state->aes_xcbc_ooo.ldata[j].job_in_lane = NULL;
+ state->aes_xcbc_ooo.ldata[j].final_block[16] = 0x80;
+ memset(state->aes_xcbc_ooo.ldata[j].final_block + 17, 0x00, 15);
+ }
+
+ /* Init AES-CCM auth out-of-order fields */
+ memset(&state->aes_ccm_ooo, 0, sizeof(MB_MGR_CCM_OOO));
+ for (j = 4; j < 8; j++)
+ state->aes_ccm_ooo.lens[j] = 0xFFFF;
+ state->aes_ccm_ooo.unused_lanes = 0xF3210;
+
+ /* Init AES-CMAC auth out-of-order fields */
+ state->aes_cmac_ooo.lens[0] = 0;
+ state->aes_cmac_ooo.lens[1] = 0;
+ state->aes_cmac_ooo.lens[2] = 0;
+ state->aes_cmac_ooo.lens[3] = 0;
+ state->aes_cmac_ooo.lens[4] = 0xFFFF;
+ state->aes_cmac_ooo.lens[5] = 0xFFFF;
+ state->aes_cmac_ooo.lens[6] = 0xFFFF;
+ state->aes_cmac_ooo.lens[7] = 0xFFFF;
+ for (j = 0; j < 4; j++) {
+ state->aes_cmac_ooo.init_done[j] = 0;
+ state->aes_cmac_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_cmac_ooo.unused_lanes = 0xF3210;
+
+ /* Init "in order" components */
+ state->next_job = 0;
+ state->earliest_job = -1;
+
+ /* set SSE NO AESNI handlers */
+ state->get_next_job = get_next_job_sse_no_aesni;
+ state->submit_job = submit_job_sse_no_aesni;
+ state->submit_job_nocheck = submit_job_nocheck_sse_no_aesni;
+ state->get_completed_job = get_completed_job_sse_no_aesni;
+ state->flush_job = flush_job_sse_no_aesni;
+ state->queue_size = queue_size_sse_no_aesni;
+ state->keyexp_128 = aes_keyexp_128_sse_no_aesni;
+ state->keyexp_192 = aes_keyexp_192_sse_no_aesni;
+ state->keyexp_256 = aes_keyexp_256_sse_no_aesni;
+ state->cmac_subkey_gen_128 = aes_cmac_subkey_gen_sse_no_aesni;
+ state->xcbc_keyexp = aes_xcbc_expand_key_sse_no_aesni;
+ state->des_key_sched = des_key_schedule;
+ state->sha1_one_block = sha1_one_block_sse;
+ state->sha1 = sha1_sse;
+ state->sha224_one_block = sha224_one_block_sse;
+ state->sha224 = sha224_sse;
+ state->sha256_one_block = sha256_one_block_sse;
+ state->sha256 = sha256_sse;
+ state->sha384_one_block = sha384_one_block_sse;
+ state->sha384 = sha384_sse;
+ state->sha512_one_block = sha512_one_block_sse;
+ state->sha512 = sha512_sse;
+ state->md5_one_block = md5_one_block_sse;
+ state->aes128_cfb_one = aes_cfb_128_one_sse_no_aesni;
+
+ state->eea3_1_buffer = zuc_eea3_1_buffer_sse;
+ state->eea3_4_buffer = zuc_eea3_4_buffer_sse;
+ state->eea3_n_buffer = zuc_eea3_n_buffer_sse;
+ state->eia3_1_buffer = zuc_eia3_1_buffer_sse;
+
+ state->f8_1_buffer = kasumi_f8_1_buffer_sse;
+ state->f8_1_buffer_bit = kasumi_f8_1_buffer_bit_sse;
+ state->f8_2_buffer = kasumi_f8_2_buffer_sse;
+ state->f8_3_buffer = kasumi_f8_3_buffer_sse;
+ state->f8_4_buffer = kasumi_f8_4_buffer_sse;
+ state->f8_n_buffer = kasumi_f8_n_buffer_sse;
+ state->f9_1_buffer = kasumi_f9_1_buffer_sse;
+ state->f9_1_buffer_user = kasumi_f9_1_buffer_user_sse;
+ state->kasumi_init_f8_key_sched = kasumi_init_f8_key_sched_sse;
+ state->kasumi_init_f9_key_sched = kasumi_init_f9_key_sched_sse;
+ state->kasumi_key_sched_size = kasumi_key_sched_size_sse;
+
+ state->snow3g_f8_1_buffer_bit = snow3g_f8_1_buffer_bit_sse_no_aesni;
+ state->snow3g_f8_1_buffer = snow3g_f8_1_buffer_sse_no_aesni;
+ state->snow3g_f8_2_buffer = snow3g_f8_2_buffer_sse_no_aesni;
+ state->snow3g_f8_4_buffer = snow3g_f8_4_buffer_sse_no_aesni;
+ state->snow3g_f8_8_buffer = snow3g_f8_8_buffer_sse_no_aesni;
+ state->snow3g_f8_n_buffer = snow3g_f8_n_buffer_sse_no_aesni;
+ state->snow3g_f8_8_buffer_multikey =
+ snow3g_f8_8_buffer_multikey_sse_no_aesni;
+ state->snow3g_f8_n_buffer_multikey =
+ snow3g_f8_n_buffer_multikey_sse_no_aesni;
+ state->snow3g_f9_1_buffer = snow3g_f9_1_buffer_sse_no_aesni;
+ state->snow3g_init_key_sched = snow3g_init_key_sched_sse_no_aesni;
+ state->snow3g_key_sched_size = snow3g_key_sched_size_sse_no_aesni;
+
+#ifndef NO_GCM
+ state->gcm128_enc = aes_gcm_enc_128_sse_no_aesni;
+ state->gcm192_enc = aes_gcm_enc_192_sse_no_aesni;
+ state->gcm256_enc = aes_gcm_enc_256_sse_no_aesni;
+ state->gcm128_dec = aes_gcm_dec_128_sse_no_aesni;
+ state->gcm192_dec = aes_gcm_dec_192_sse_no_aesni;
+ state->gcm256_dec = aes_gcm_dec_256_sse_no_aesni;
+ state->gcm128_init = aes_gcm_init_128_sse_no_aesni;
+ state->gcm192_init = aes_gcm_init_192_sse_no_aesni;
+ state->gcm256_init = aes_gcm_init_256_sse_no_aesni;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_sse_no_aesni;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_sse_no_aesni;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_sse_no_aesni;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_sse_no_aesni;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_sse_no_aesni;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_sse_no_aesni;
+ state->gcm128_enc_finalize = aes_gcm_enc_128_finalize_sse_no_aesni;
+ state->gcm192_enc_finalize = aes_gcm_enc_192_finalize_sse_no_aesni;
+ state->gcm256_enc_finalize = aes_gcm_enc_256_finalize_sse_no_aesni;
+ state->gcm128_dec_finalize = aes_gcm_dec_128_finalize_sse_no_aesni;
+ state->gcm192_dec_finalize = aes_gcm_dec_192_finalize_sse_no_aesni;
+ state->gcm256_dec_finalize = aes_gcm_dec_256_finalize_sse_no_aesni;
+ state->gcm128_precomp = aes_gcm_precomp_128_sse_no_aesni;
+ state->gcm192_precomp = aes_gcm_precomp_192_sse_no_aesni;
+ state->gcm256_precomp = aes_gcm_precomp_256_sse_no_aesni;
+ state->gcm128_pre = aes_gcm_pre_128_sse_no_aesni;
+ state->gcm192_pre = aes_gcm_pre_192_sse_no_aesni;
+ state->gcm256_pre = aes_gcm_pre_256_sse_no_aesni;
+#endif
+}
+
+#include "mb_mgr_code.h"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/pon_sse_no_aesni.asm b/src/spdk/intel-ipsec-mb/no-aesni/pon_sse_no_aesni.asm
new file mode 100644
index 000000000..a8a8c2e8e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/pon_sse_no_aesni.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/aesni_emu.inc"
+%define DEC_FN_NAME submit_job_pon_dec_sse_no_aesni
+%define ENC_FN_NAME submit_job_pon_enc_sse_no_aesni
+%define DEC_NO_CTR_FN_NAME submit_job_pon_dec_no_ctr_sse_no_aesni
+%define ENC_NO_CTR_FN_NAME submit_job_pon_enc_no_ctr_sse_no_aesni
+%include "sse/pon_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/no-aesni/snow3g_sse_no_aesni.c b/src/spdk/intel-ipsec-mb/no-aesni/snow3g_sse_no_aesni.c
new file mode 100644
index 000000000..a30c941fc
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/no-aesni/snow3g_sse_no_aesni.c
@@ -0,0 +1,43 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#define SSE
+#define NO_AESNI
+#define SNOW3G_F8_1_BUFFER_BIT snow3g_f8_1_buffer_bit_sse_no_aesni
+#define SNOW3G_F8_1_BUFFER snow3g_f8_1_buffer_sse_no_aesni
+#define SNOW3G_F8_2_BUFFER snow3g_f8_2_buffer_sse_no_aesni
+#define SNOW3G_F8_4_BUFFER snow3g_f8_4_buffer_sse_no_aesni
+#define SNOW3G_F8_8_BUFFER snow3g_f8_8_buffer_sse_no_aesni
+#define SNOW3G_F8_N_BUFFER snow3g_f8_n_buffer_sse_no_aesni
+#define SNOW3G_F8_8_BUFFER_MULTIKEY snow3g_f8_8_buffer_multikey_sse_no_aesni
+#define SNOW3G_F8_N_BUFFER_MULTIKEY snow3g_f8_n_buffer_multikey_sse_no_aesni
+#define SNOW3G_F9_1_BUFFER snow3g_f9_1_buffer_sse_no_aesni
+#define SNOW3G_INIT_KEY_SCHED snow3g_init_key_sched_sse_no_aesni
+#define SNOW3G_KEY_SCHED_SIZE snow3g_key_sched_size_sse_no_aesni
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+#include "include/snow3g_common.h"
diff --git a/src/spdk/intel-ipsec-mb/sha_one_block.c b/src/spdk/intel-ipsec-mb/sha_one_block.c
new file mode 100644
index 000000000..4458fad4b
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sha_one_block.c
@@ -0,0 +1,575 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "constants.h"
+#include "include/clear_regs_mem.h"
+
+extern void sha1_block_sse(const void *, void *);
+extern void sha1_block_avx(const void *, void *);
+
+extern void sha224_block_sse(const void *, void *);
+extern void sha224_block_avx(const void *, void *);
+
+extern void sha256_block_sse(const void *, void *);
+extern void sha256_block_avx(const void *, void *);
+
+extern void sha384_block_sse(const void *, void *);
+extern void sha384_block_avx(const void *, void *);
+
+extern void sha512_block_sse(const void *, void *);
+extern void sha512_block_avx(const void *, void *);
+
+
+/* ========================================================================== */
+/*
+ * Various utility functions for SHA API
+ */
+
+__forceinline
+uint32_t bswap4(const uint32_t val)
+{
+ return ((val >> 24) | /**< A*/
+ ((val & 0xff0000) >> 8) | /**< B*/
+ ((val & 0xff00) << 8) | /**< C*/
+ (val << 24)); /**< D*/
+}
+
+__forceinline
+uint64_t bswap8(const uint64_t val)
+{
+ return (((uint64_t) bswap4((uint32_t) val)) << 32) |
+ (((uint64_t) bswap4((uint32_t) (val >> 32))));
+}
+
+__forceinline
+void store8_be(void *outp, const uint64_t val)
+{
+ *((uint64_t *)outp) = bswap8(val);
+}
+
+__forceinline
+void var_memcpy(void *dst, const void *src, const uint64_t len)
+{
+ uint64_t i;
+ const uint8_t *src8 = (const uint8_t *)src;
+ uint8_t *dst8 = (uint8_t *)dst;
+
+ for (i = 0; i < len; i++)
+ dst8[i] = src8[i];
+}
+
+__forceinline
+void copy_bswap4_array(void *dst, const void *src, const size_t num)
+{
+ uint32_t *outp = (uint32_t *) dst;
+ const uint32_t *inp = (const uint32_t *) src;
+ size_t i;
+
+ for (i = 0; i < num; i++)
+ outp[i] = bswap4(inp[i]);
+}
+
+__forceinline
+void copy_bswap8_array(void *dst, const void *src, const size_t num)
+{
+ uint64_t *outp = (uint64_t *) dst;
+ const uint64_t *inp = (const uint64_t *) src;
+ size_t i;
+
+ for (i = 0; i < num; i++)
+ outp[i] = bswap8(inp[i]);
+}
+
+__forceinline
+void
+sha_generic_one_block(const void *inp, void *digest,
+ const int is_avx, const int sha_type)
+{
+ if (sha_type == 1) {
+ if (is_avx)
+ sha1_block_avx(inp, digest);
+ else
+ sha1_block_sse(inp, digest);
+ } else if (sha_type == 224) {
+ if (is_avx)
+ sha224_block_avx(inp, digest);
+ else
+ sha224_block_sse(inp, digest);
+ } else if (sha_type == 256) {
+ if (is_avx)
+ sha256_block_avx(inp, digest);
+ else
+ sha256_block_sse(inp, digest);
+ } else if (sha_type == 384) {
+ if (is_avx)
+ sha384_block_avx(inp, digest);
+ else
+ sha384_block_sse(inp, digest);
+ } else if (sha_type == 512) {
+ if (is_avx)
+ sha512_block_avx(inp, digest);
+ else
+ sha512_block_sse(inp, digest);
+ }
+}
+
+__forceinline
+void sha1_init_digest(void *p)
+{
+ uint32_t *p_digest = (uint32_t *)p;
+
+ p_digest[0] = H0;
+ p_digest[1] = H1;
+ p_digest[2] = H2;
+ p_digest[3] = H3;
+ p_digest[4] = H4;
+}
+
+__forceinline
+void sha224_init_digest(void *p)
+{
+ uint32_t *p_digest = (uint32_t *)p;
+
+ p_digest[0] = SHA224_H0;
+ p_digest[1] = SHA224_H1;
+ p_digest[2] = SHA224_H2;
+ p_digest[3] = SHA224_H3;
+ p_digest[4] = SHA224_H4;
+ p_digest[5] = SHA224_H5;
+ p_digest[6] = SHA224_H6;
+ p_digest[7] = SHA224_H7;
+}
+
+__forceinline
+void sha256_init_digest(void *p)
+{
+ uint32_t *p_digest = (uint32_t *)p;
+
+ p_digest[0] = SHA256_H0;
+ p_digest[1] = SHA256_H1;
+ p_digest[2] = SHA256_H2;
+ p_digest[3] = SHA256_H3;
+ p_digest[4] = SHA256_H4;
+ p_digest[5] = SHA256_H5;
+ p_digest[6] = SHA256_H6;
+ p_digest[7] = SHA256_H7;
+}
+
+__forceinline
+void sha384_init_digest(void *p)
+{
+ uint64_t *p_digest = (uint64_t *)p;
+
+ p_digest[0] = SHA384_H0;
+ p_digest[1] = SHA384_H1;
+ p_digest[2] = SHA384_H2;
+ p_digest[3] = SHA384_H3;
+ p_digest[4] = SHA384_H4;
+ p_digest[5] = SHA384_H5;
+ p_digest[6] = SHA384_H6;
+ p_digest[7] = SHA384_H7;
+}
+
+__forceinline
+void sha512_init_digest(void *p)
+{
+ uint64_t *p_digest = (uint64_t *)p;
+
+ p_digest[0] = SHA512_H0;
+ p_digest[1] = SHA512_H1;
+ p_digest[2] = SHA512_H2;
+ p_digest[3] = SHA512_H3;
+ p_digest[4] = SHA512_H4;
+ p_digest[5] = SHA512_H5;
+ p_digest[6] = SHA512_H6;
+ p_digest[7] = SHA512_H7;
+}
+
+__forceinline
+void
+sha_generic_init(void *digest, const int sha_type)
+{
+ if (sha_type == 1)
+ sha1_init_digest(digest);
+ else if (sha_type == 224)
+ sha224_init_digest(digest);
+ else if (sha_type == 256)
+ sha256_init_digest(digest);
+ else if (sha_type == 384)
+ sha384_init_digest(digest);
+ else if (sha_type == 512)
+ sha512_init_digest(digest);
+}
+
+__forceinline
+void sha_generic_write_digest(void *dst, const void *src, const int sha_type)
+{
+ if (sha_type == 1)
+ copy_bswap4_array(dst, src, NUM_SHA_DIGEST_WORDS);
+ else if (sha_type == 224)
+ copy_bswap4_array(dst, src, NUM_SHA_224_DIGEST_WORDS);
+ else if (sha_type == 256)
+ copy_bswap4_array(dst, src, NUM_SHA_256_DIGEST_WORDS);
+ else if (sha_type == 384)
+ copy_bswap8_array(dst, src, NUM_SHA_384_DIGEST_WORDS);
+ else if (sha_type == 512)
+ copy_bswap8_array(dst, src, NUM_SHA_512_DIGEST_WORDS);
+}
+
+__forceinline
+void
+sha_generic(const void *data, const uint64_t length, void *digest,
+ const int is_avx, const int sha_type, const uint64_t blk_size,
+ const uint64_t pad_size)
+{
+#ifdef SAFE_PARAM
+ if (data == NULL || digest == NULL)
+ return;
+#endif
+
+ uint8_t cb[SHA_512_BLOCK_SIZE]; /* biggest possible */
+ union {
+ uint32_t digest1[NUM_SHA_256_DIGEST_WORDS];
+ uint64_t digest2[NUM_SHA_512_DIGEST_WORDS];
+ } local_digest;
+ void *ld = (void *) &local_digest;
+ const uint8_t *inp = (const uint8_t *) data;
+ uint64_t idx, r;
+
+ sha_generic_init(ld, sha_type);
+
+ for (idx = 0; (idx + blk_size) <= length; idx += blk_size)
+ sha_generic_one_block(&inp[idx], ld, is_avx, sha_type);
+
+ r = length % blk_size;
+
+ memset(cb, 0, sizeof(cb));
+ var_memcpy(cb, &inp[idx], r);
+ cb[r] = 0x80;
+
+ if (r >= (blk_size - pad_size)) {
+ /* length will be encoded in the next block */
+ sha_generic_one_block(cb, ld, is_avx, sha_type);
+ memset(cb, 0, sizeof(cb));
+ }
+
+ store8_be(&cb[blk_size - 8], length * 8 /* bit length */);
+ sha_generic_one_block(cb, ld, is_avx, sha_type);
+
+ sha_generic_write_digest(digest, ld, sha_type);
+#ifdef SAFE_DATA
+ clear_mem(cb, sizeof(cb));
+ clear_mem(&local_digest, sizeof(local_digest));
+ clear_scratch_gps();
+ if (is_avx)
+ clear_scratch_xmms_avx();
+ else
+ clear_scratch_xmms_sse();
+#endif
+}
+
+__forceinline
+void sha_generic_1block(const void *data, void *digest,
+ const int is_avx, const int sha_type)
+{
+#ifdef SAFE_PARAM
+ if (data == NULL || digest == NULL)
+ return;
+#endif
+ sha_generic_init(digest, sha_type);
+ sha_generic_one_block(data, digest, is_avx, sha_type);
+#ifdef SAFE_DATA
+ clear_scratch_gps();
+ if (is_avx)
+ clear_scratch_xmms_avx();
+ else
+ clear_scratch_xmms_sse();
+#endif
+}
+
+
+/* ========================================================================== */
+/* One block SHA1 computation for IPAD / OPAD usage only */
+
+void sha1_one_block_sse(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 0 /* SSE */, 1 /* SHA1 */);
+}
+
+void sha1_one_block_avx(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 1 /* SHA1 */);
+}
+
+void sha1_one_block_avx2(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 1 /* SHA1 */);
+}
+
+void sha1_one_block_avx512(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 1 /* SHA1 */);
+}
+
+
+/* ========================================================================== */
+/*
+ * SHA1 API for use in HMAC-SHA1 when key is longer than the block size
+ */
+
+void sha1_sse(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 0 /* SSE */, 1, SHA1_BLOCK_SIZE,
+ SHA1_PAD_SIZE);
+}
+
+void sha1_avx(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 1, SHA1_BLOCK_SIZE,
+ SHA1_PAD_SIZE);
+}
+
+void sha1_avx2(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 1, SHA1_BLOCK_SIZE,
+ SHA1_PAD_SIZE);
+}
+
+void sha1_avx512(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 1, SHA1_BLOCK_SIZE,
+ SHA1_PAD_SIZE);
+}
+
+/* ========================================================================== */
+/* One block SHA224 computation for IPAD / OPAD usage only */
+
+void sha224_one_block_sse(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 0 /* SSE */, 224 /* SHA224 */);
+}
+
+void sha224_one_block_avx(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 224 /* SHA224 */);
+}
+
+void sha224_one_block_avx2(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 224 /* SHA224 */);
+}
+
+void sha224_one_block_avx512(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 224 /* SHA224 */);
+}
+
+/* ========================================================================== */
+/*
+ * SHA224 API for use in HMAC-SHA224 when key is longer than the block size
+ */
+void sha224_sse(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 0 /* SSE */, 224, SHA_256_BLOCK_SIZE,
+ SHA224_PAD_SIZE);
+}
+
+void sha224_avx(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 224, SHA_256_BLOCK_SIZE,
+ SHA224_PAD_SIZE);
+}
+
+void sha224_avx2(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 224, SHA_256_BLOCK_SIZE,
+ SHA224_PAD_SIZE);
+}
+
+void sha224_avx512(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 224, SHA_256_BLOCK_SIZE,
+ SHA224_PAD_SIZE);
+}
+
+/* ========================================================================== */
+/* One block SHA256 computation for IPAD / OPAD usage only */
+
+void sha256_one_block_sse(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 0 /* SSE */, 256 /* SHA256 */);
+}
+
+void sha256_one_block_avx(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 256 /* SHA256 */);
+}
+
+void sha256_one_block_avx2(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 256 /* SHA256 */);
+}
+
+void sha256_one_block_avx512(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 256 /* SHA256 */);
+}
+
+/* ========================================================================== */
+/*
+ * SHA256 API for use in HMAC-SHA256 when key is longer than the block size
+ */
+void sha256_sse(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 0 /* SSE */, 256, SHA_256_BLOCK_SIZE,
+ SHA256_PAD_SIZE);
+}
+
+void sha256_avx(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 256, SHA_256_BLOCK_SIZE,
+ SHA256_PAD_SIZE);
+}
+
+void sha256_avx2(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 256, SHA_256_BLOCK_SIZE,
+ SHA256_PAD_SIZE);
+}
+
+void sha256_avx512(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 256, SHA_256_BLOCK_SIZE,
+ SHA256_PAD_SIZE);
+}
+
+/* ========================================================================== */
+/* One block SHA384 computation for IPAD / OPAD usage only */
+
+void sha384_one_block_sse(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 0 /* SSE */, 384 /* SHA384 */);
+}
+
+void sha384_one_block_avx(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 384 /* SHA384 */);
+}
+
+void sha384_one_block_avx2(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 384 /* SHA384 */);
+}
+
+void sha384_one_block_avx512(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 384 /* SHA384 */);
+}
+
+/* ========================================================================== */
+/*
+ * SHA384 API for use in HMAC-SHA384 when key is longer than the block size
+ */
+void sha384_sse(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 0 /* SSE */, 384, SHA_384_BLOCK_SIZE,
+ SHA384_PAD_SIZE);
+}
+
+void sha384_avx(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 384, SHA_384_BLOCK_SIZE,
+ SHA384_PAD_SIZE);
+}
+
+void sha384_avx2(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 384, SHA_384_BLOCK_SIZE,
+ SHA384_PAD_SIZE);
+}
+
+void sha384_avx512(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 384, SHA_384_BLOCK_SIZE,
+ SHA384_PAD_SIZE);
+}
+
+/* ========================================================================== */
+/* One block SHA512 computation for IPAD / OPAD usage only */
+
+void sha512_one_block_sse(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 0 /* SSE */, 512 /* SHA512 */);
+}
+
+void sha512_one_block_avx(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 512 /* SHA512 */);
+}
+
+void sha512_one_block_avx2(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 512 /* SHA512 */);
+}
+
+void sha512_one_block_avx512(const void *data, void *digest)
+{
+ sha_generic_1block(data, digest, 1 /* AVX */, 512 /* SHA512 */);
+}
+
+/* ========================================================================== */
+/*
+ * SHA512 API for use in HMAC-SHA512 when key is longer than the block size
+ */
+void sha512_sse(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 0 /* SSE */, 512, SHA_512_BLOCK_SIZE,
+ SHA512_PAD_SIZE);
+}
+
+void sha512_avx(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 512, SHA_512_BLOCK_SIZE,
+ SHA512_PAD_SIZE);
+}
+
+void sha512_avx2(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 512, SHA_512_BLOCK_SIZE,
+ SHA512_PAD_SIZE);
+}
+
+void sha512_avx512(const void *data, const uint64_t length, void *digest)
+{
+ sha_generic(data, length, digest, 1 /* AVX */, 512, SHA_512_BLOCK_SIZE,
+ SHA512_PAD_SIZE);
+}
diff --git a/src/spdk/intel-ipsec-mb/snow3g_iv.c b/src/spdk/intel-ipsec-mb/snow3g_iv.c
new file mode 100644
index 000000000..d1f48881c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/snow3g_iv.c
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "include/wireless_common.h"
+
+int
+snow3g_f8_iv_gen(const uint32_t count, const uint8_t bearer,
+ const uint8_t dir, void *iv_ptr)
+{
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv_ptr == NULL)
+ return -1;
+
+ /* Bearer must contain 5 bits only */
+ if (bearer >= (1<<5))
+ return -1;
+
+ /* Direction must contain 1 bit only */
+ if (dir > 1)
+ return -1;
+ /**
+ * Parameters are passed in Little Endian format
+ * and reversed to generate the IV in Big Endian format
+ */
+ /* IV[3] = BEARER || DIRECTION || 0s */
+ iv32[3] = bswap4((bearer << 27) | (dir << 26));
+
+ /* IV[2] = COUNT */
+ iv32[2] = bswap4(count);
+
+ /* IV[1] = BEARER || DIRECTION || 0s */
+ iv32[1] = iv32[3];
+
+ /* IV[0] = COUNT */
+ iv32[0] = iv32[2];
+
+ return 0;
+}
+
+int
+snow3g_f9_iv_gen(const uint32_t count, const uint32_t fresh,
+ const uint8_t dir, void *iv_ptr)
+{
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv_ptr == NULL)
+ return -1;
+
+ /* Direction must contain 1 bit only */
+ if (dir > 1)
+ return -1;
+ /**
+ * Parameters are passed in Little Endian format
+ * and reversed to generate the IV in Big Endian format
+ */
+ /* IV[3] = FRESH ^ (DIRECTION[0] << 17) */
+ iv32[3] = bswap4(fresh ^ (dir << 15));
+
+ /* IV[2] = DIRECTION[0] ^ COUNT[0-31] */
+ iv32[2] = bswap4(count ^ (dir << 31));
+
+ /* IV[1] = FRESH */
+ iv32[1] = bswap4(fresh);
+
+ /* IV[0] = COUNT */
+ iv32[0] = bswap4(count);
+
+ return 0;
+}
diff --git a/src/spdk/intel-ipsec-mb/snow3g_tables.c b/src/spdk/intel-ipsec-mb/snow3g_tables.c
new file mode 100644
index 000000000..eabf21f07
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/snow3g_tables.c
@@ -0,0 +1,757 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include "snow3g_internal.h"
+
+/*--------------------------------------------------------------------
+ *
+ * An implementation of SNOW 3G, the core algorithm for the
+ * 3GPP Confidentiality and Integrity algorithms.
+ *
+ *--------------------------------------------------------------------*/
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int snow3g_table_A_mul[256], 32) = {
+ 0x00000000, 0xe19fcf13, 0x6b973726, 0x8a08f835, 0xd6876e4c, 0x3718a15f,
+ 0xbd10596a, 0x5c8f9679, 0x05a7dc98, 0xe438138b, 0x6e30ebbe, 0x8faf24ad,
+ 0xd320b2d4, 0x32bf7dc7, 0xb8b785f2, 0x59284ae1, 0x0ae71199, 0xeb78de8a,
+ 0x617026bf, 0x80efe9ac, 0xdc607fd5, 0x3dffb0c6, 0xb7f748f3, 0x566887e0,
+ 0x0f40cd01, 0xeedf0212, 0x64d7fa27, 0x85483534, 0xd9c7a34d, 0x38586c5e,
+ 0xb250946b, 0x53cf5b78, 0x1467229b, 0xf5f8ed88, 0x7ff015bd, 0x9e6fdaae,
+ 0xc2e04cd7, 0x237f83c4, 0xa9777bf1, 0x48e8b4e2, 0x11c0fe03, 0xf05f3110,
+ 0x7a57c925, 0x9bc80636, 0xc747904f, 0x26d85f5c, 0xacd0a769, 0x4d4f687a,
+ 0x1e803302, 0xff1ffc11, 0x75170424, 0x9488cb37, 0xc8075d4e, 0x2998925d,
+ 0xa3906a68, 0x420fa57b, 0x1b27ef9a, 0xfab82089, 0x70b0d8bc, 0x912f17af,
+ 0xcda081d6, 0x2c3f4ec5, 0xa637b6f0, 0x47a879e3, 0x28ce449f, 0xc9518b8c,
+ 0x435973b9, 0xa2c6bcaa, 0xfe492ad3, 0x1fd6e5c0, 0x95de1df5, 0x7441d2e6,
+ 0x2d699807, 0xccf65714, 0x46feaf21, 0xa7616032, 0xfbeef64b, 0x1a713958,
+ 0x9079c16d, 0x71e60e7e, 0x22295506, 0xc3b69a15, 0x49be6220, 0xa821ad33,
+ 0xf4ae3b4a, 0x1531f459, 0x9f390c6c, 0x7ea6c37f, 0x278e899e, 0xc611468d,
+ 0x4c19beb8, 0xad8671ab, 0xf109e7d2, 0x109628c1, 0x9a9ed0f4, 0x7b011fe7,
+ 0x3ca96604, 0xdd36a917, 0x573e5122, 0xb6a19e31, 0xea2e0848, 0x0bb1c75b,
+ 0x81b93f6e, 0x6026f07d, 0x390eba9c, 0xd891758f, 0x52998dba, 0xb30642a9,
+ 0xef89d4d0, 0x0e161bc3, 0x841ee3f6, 0x65812ce5, 0x364e779d, 0xd7d1b88e,
+ 0x5dd940bb, 0xbc468fa8, 0xe0c919d1, 0x0156d6c2, 0x8b5e2ef7, 0x6ac1e1e4,
+ 0x33e9ab05, 0xd2766416, 0x587e9c23, 0xb9e15330, 0xe56ec549, 0x04f10a5a,
+ 0x8ef9f26f, 0x6f663d7c, 0x50358897, 0xb1aa4784, 0x3ba2bfb1, 0xda3d70a2,
+ 0x86b2e6db, 0x672d29c8, 0xed25d1fd, 0x0cba1eee, 0x5592540f, 0xb40d9b1c,
+ 0x3e056329, 0xdf9aac3a, 0x83153a43, 0x628af550, 0xe8820d65, 0x091dc276,
+ 0x5ad2990e, 0xbb4d561d, 0x3145ae28, 0xd0da613b, 0x8c55f742, 0x6dca3851,
+ 0xe7c2c064, 0x065d0f77, 0x5f754596, 0xbeea8a85, 0x34e272b0, 0xd57dbda3,
+ 0x89f22bda, 0x686de4c9, 0xe2651cfc, 0x03fad3ef, 0x4452aa0c, 0xa5cd651f,
+ 0x2fc59d2a, 0xce5a5239, 0x92d5c440, 0x734a0b53, 0xf942f366, 0x18dd3c75,
+ 0x41f57694, 0xa06ab987, 0x2a6241b2, 0xcbfd8ea1, 0x977218d8, 0x76edd7cb,
+ 0xfce52ffe, 0x1d7ae0ed, 0x4eb5bb95, 0xaf2a7486, 0x25228cb3, 0xc4bd43a0,
+ 0x9832d5d9, 0x79ad1aca, 0xf3a5e2ff, 0x123a2dec, 0x4b12670d, 0xaa8da81e,
+ 0x2085502b, 0xc11a9f38, 0x9d950941, 0x7c0ac652, 0xf6023e67, 0x179df174,
+ 0x78fbcc08, 0x9964031b, 0x136cfb2e, 0xf2f3343d, 0xae7ca244, 0x4fe36d57,
+ 0xc5eb9562, 0x24745a71, 0x7d5c1090, 0x9cc3df83, 0x16cb27b6, 0xf754e8a5,
+ 0xabdb7edc, 0x4a44b1cf, 0xc04c49fa, 0x21d386e9, 0x721cdd91, 0x93831282,
+ 0x198beab7, 0xf81425a4, 0xa49bb3dd, 0x45047cce, 0xcf0c84fb, 0x2e934be8,
+ 0x77bb0109, 0x9624ce1a, 0x1c2c362f, 0xfdb3f93c, 0xa13c6f45, 0x40a3a056,
+ 0xcaab5863, 0x2b349770, 0x6c9cee93, 0x8d032180, 0x070bd9b5, 0xe69416a6,
+ 0xba1b80df, 0x5b844fcc, 0xd18cb7f9, 0x301378ea, 0x693b320b, 0x88a4fd18,
+ 0x02ac052d, 0xe333ca3e, 0xbfbc5c47, 0x5e239354, 0xd42b6b61, 0x35b4a472,
+ 0x667bff0a, 0x87e43019, 0x0decc82c, 0xec73073f, 0xb0fc9146, 0x51635e55,
+ 0xdb6ba660, 0x3af46973, 0x63dc2392, 0x8243ec81, 0x084b14b4, 0xe9d4dba7,
+ 0xb55b4dde, 0x54c482cd, 0xdecc7af8, 0x3f53b5eb};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int snow3g_table_A_div[256],32) = {
+ 0x00000000, 0x180f40cd, 0x301e8033, 0x2811c0fe, 0x603ca966, 0x7833e9ab,
+ 0x50222955, 0x482d6998, 0xc078fbcc, 0xd877bb01, 0xf0667bff, 0xe8693b32,
+ 0xa04452aa, 0xb84b1267, 0x905ad299, 0x88559254, 0x29f05f31, 0x31ff1ffc,
+ 0x19eedf02, 0x01e19fcf, 0x49ccf657, 0x51c3b69a, 0x79d27664, 0x61dd36a9,
+ 0xe988a4fd, 0xf187e430, 0xd99624ce, 0xc1996403, 0x89b40d9b, 0x91bb4d56,
+ 0xb9aa8da8, 0xa1a5cd65, 0x5249be62, 0x4a46feaf, 0x62573e51, 0x7a587e9c,
+ 0x32751704, 0x2a7a57c9, 0x026b9737, 0x1a64d7fa, 0x923145ae, 0x8a3e0563,
+ 0xa22fc59d, 0xba208550, 0xf20decc8, 0xea02ac05, 0xc2136cfb, 0xda1c2c36,
+ 0x7bb9e153, 0x63b6a19e, 0x4ba76160, 0x53a821ad, 0x1b854835, 0x038a08f8,
+ 0x2b9bc806, 0x339488cb, 0xbbc11a9f, 0xa3ce5a52, 0x8bdf9aac, 0x93d0da61,
+ 0xdbfdb3f9, 0xc3f2f334, 0xebe333ca, 0xf3ec7307, 0xa492d5c4, 0xbc9d9509,
+ 0x948c55f7, 0x8c83153a, 0xc4ae7ca2, 0xdca13c6f, 0xf4b0fc91, 0xecbfbc5c,
+ 0x64ea2e08, 0x7ce56ec5, 0x54f4ae3b, 0x4cfbeef6, 0x04d6876e, 0x1cd9c7a3,
+ 0x34c8075d, 0x2cc74790, 0x8d628af5, 0x956dca38, 0xbd7c0ac6, 0xa5734a0b,
+ 0xed5e2393, 0xf551635e, 0xdd40a3a0, 0xc54fe36d, 0x4d1a7139, 0x551531f4,
+ 0x7d04f10a, 0x650bb1c7, 0x2d26d85f, 0x35299892, 0x1d38586c, 0x053718a1,
+ 0xf6db6ba6, 0xeed42b6b, 0xc6c5eb95, 0xdecaab58, 0x96e7c2c0, 0x8ee8820d,
+ 0xa6f942f3, 0xbef6023e, 0x36a3906a, 0x2eacd0a7, 0x06bd1059, 0x1eb25094,
+ 0x569f390c, 0x4e9079c1, 0x6681b93f, 0x7e8ef9f2, 0xdf2b3497, 0xc724745a,
+ 0xef35b4a4, 0xf73af469, 0xbf179df1, 0xa718dd3c, 0x8f091dc2, 0x97065d0f,
+ 0x1f53cf5b, 0x075c8f96, 0x2f4d4f68, 0x37420fa5, 0x7f6f663d, 0x676026f0,
+ 0x4f71e60e, 0x577ea6c3, 0xe18d0321, 0xf98243ec, 0xd1938312, 0xc99cc3df,
+ 0x81b1aa47, 0x99beea8a, 0xb1af2a74, 0xa9a06ab9, 0x21f5f8ed, 0x39fab820,
+ 0x11eb78de, 0x09e43813, 0x41c9518b, 0x59c61146, 0x71d7d1b8, 0x69d89175,
+ 0xc87d5c10, 0xd0721cdd, 0xf863dc23, 0xe06c9cee, 0xa841f576, 0xb04eb5bb,
+ 0x985f7545, 0x80503588, 0x0805a7dc, 0x100ae711, 0x381b27ef, 0x20146722,
+ 0x68390eba, 0x70364e77, 0x58278e89, 0x4028ce44, 0xb3c4bd43, 0xabcbfd8e,
+ 0x83da3d70, 0x9bd57dbd, 0xd3f81425, 0xcbf754e8, 0xe3e69416, 0xfbe9d4db,
+ 0x73bc468f, 0x6bb30642, 0x43a2c6bc, 0x5bad8671, 0x1380efe9, 0x0b8faf24,
+ 0x239e6fda, 0x3b912f17, 0x9a34e272, 0x823ba2bf, 0xaa2a6241, 0xb225228c,
+ 0xfa084b14, 0xe2070bd9, 0xca16cb27, 0xd2198bea, 0x5a4c19be, 0x42435973,
+ 0x6a52998d, 0x725dd940, 0x3a70b0d8, 0x227ff015, 0x0a6e30eb, 0x12617026,
+ 0x451fd6e5, 0x5d109628, 0x750156d6, 0x6d0e161b, 0x25237f83, 0x3d2c3f4e,
+ 0x153dffb0, 0x0d32bf7d, 0x85672d29, 0x9d686de4, 0xb579ad1a, 0xad76edd7,
+ 0xe55b844f, 0xfd54c482, 0xd545047c, 0xcd4a44b1, 0x6cef89d4, 0x74e0c919,
+ 0x5cf109e7, 0x44fe492a, 0x0cd320b2, 0x14dc607f, 0x3ccda081, 0x24c2e04c,
+ 0xac977218, 0xb49832d5, 0x9c89f22b, 0x8486b2e6, 0xccabdb7e, 0xd4a49bb3,
+ 0xfcb55b4d, 0xe4ba1b80, 0x17566887, 0x0f59284a, 0x2748e8b4, 0x3f47a879,
+ 0x776ac1e1, 0x6f65812c, 0x477441d2, 0x5f7b011f, 0xd72e934b, 0xcf21d386,
+ 0xe7301378, 0xff3f53b5, 0xb7123a2d, 0xaf1d7ae0, 0x870cba1e, 0x9f03fad3,
+ 0x3ea637b6, 0x26a9777b, 0x0eb8b785, 0x16b7f748, 0x5e9a9ed0, 0x4695de1d,
+ 0x6e841ee3, 0x768b5e2e, 0xfedecc7a, 0xe6d18cb7, 0xcec04c49, 0xd6cf0c84,
+ 0x9ee2651c, 0x86ed25d1, 0xaefce52f, 0xb6f3a5e2};
+
+IMB_DLL_LOCAL
+snow3gTableEntry_t snow3g_table_S1[256] = {
+ {.v = 0xc6a56363c6a56363ULL}, {.v = 0xf8847c7cf8847c7cULL},
+ {.v = 0xee997777ee997777ULL}, {.v = 0xf68d7b7bf68d7b7bULL},
+ {.v = 0xff0df2f2ff0df2f2ULL}, {.v = 0xd6bd6b6bd6bd6b6bULL},
+ {.v = 0xdeb16f6fdeb16f6fULL}, {.v = 0x9154c5c59154c5c5ULL},
+ {.v = 0x6050303060503030ULL}, {.v = 0x0203010102030101ULL},
+ {.v = 0xcea96767cea96767ULL}, {.v = 0x567d2b2b567d2b2bULL},
+ {.v = 0xe719fefee719fefeULL}, {.v = 0xb562d7d7b562d7d7ULL},
+ {.v = 0x4de6abab4de6ababULL}, {.v = 0xec9a7676ec9a7676ULL},
+ {.v = 0x8f45caca8f45cacaULL}, {.v = 0x1f9d82821f9d8282ULL},
+ {.v = 0x8940c9c98940c9c9ULL}, {.v = 0xfa877d7dfa877d7dULL},
+ {.v = 0xef15fafaef15fafaULL}, {.v = 0xb2eb5959b2eb5959ULL},
+ {.v = 0x8ec947478ec94747ULL}, {.v = 0xfb0bf0f0fb0bf0f0ULL},
+ {.v = 0x41ecadad41ecadadULL}, {.v = 0xb367d4d4b367d4d4ULL},
+ {.v = 0x5ffda2a25ffda2a2ULL}, {.v = 0x45eaafaf45eaafafULL},
+ {.v = 0x23bf9c9c23bf9c9cULL}, {.v = 0x53f7a4a453f7a4a4ULL},
+ {.v = 0xe4967272e4967272ULL}, {.v = 0x9b5bc0c09b5bc0c0ULL},
+ {.v = 0x75c2b7b775c2b7b7ULL}, {.v = 0xe11cfdfde11cfdfdULL},
+ {.v = 0x3dae93933dae9393ULL}, {.v = 0x4c6a26264c6a2626ULL},
+ {.v = 0x6c5a36366c5a3636ULL}, {.v = 0x7e413f3f7e413f3fULL},
+ {.v = 0xf502f7f7f502f7f7ULL}, {.v = 0x834fcccc834fccccULL},
+ {.v = 0x685c3434685c3434ULL}, {.v = 0x51f4a5a551f4a5a5ULL},
+ {.v = 0xd134e5e5d134e5e5ULL}, {.v = 0xf908f1f1f908f1f1ULL},
+ {.v = 0xe2937171e2937171ULL}, {.v = 0xab73d8d8ab73d8d8ULL},
+ {.v = 0x6253313162533131ULL}, {.v = 0x2a3f15152a3f1515ULL},
+ {.v = 0x080c0404080c0404ULL}, {.v = 0x9552c7c79552c7c7ULL},
+ {.v = 0x4665232346652323ULL}, {.v = 0x9d5ec3c39d5ec3c3ULL},
+ {.v = 0x3028181830281818ULL}, {.v = 0x37a1969637a19696ULL},
+ {.v = 0x0a0f05050a0f0505ULL}, {.v = 0x2fb59a9a2fb59a9aULL},
+ {.v = 0x0e0907070e090707ULL}, {.v = 0x2436121224361212ULL},
+ {.v = 0x1b9b80801b9b8080ULL}, {.v = 0xdf3de2e2df3de2e2ULL},
+ {.v = 0xcd26ebebcd26ebebULL}, {.v = 0x4e6927274e692727ULL},
+ {.v = 0x7fcdb2b27fcdb2b2ULL}, {.v = 0xea9f7575ea9f7575ULL},
+ {.v = 0x121b0909121b0909ULL}, {.v = 0x1d9e83831d9e8383ULL},
+ {.v = 0x58742c2c58742c2cULL}, {.v = 0x342e1a1a342e1a1aULL},
+ {.v = 0x362d1b1b362d1b1bULL}, {.v = 0xdcb26e6edcb26e6eULL},
+ {.v = 0xb4ee5a5ab4ee5a5aULL}, {.v = 0x5bfba0a05bfba0a0ULL},
+ {.v = 0xa4f65252a4f65252ULL}, {.v = 0x764d3b3b764d3b3bULL},
+ {.v = 0xb761d6d6b761d6d6ULL}, {.v = 0x7dceb3b37dceb3b3ULL},
+ {.v = 0x527b2929527b2929ULL}, {.v = 0xdd3ee3e3dd3ee3e3ULL},
+ {.v = 0x5e712f2f5e712f2fULL}, {.v = 0x1397848413978484ULL},
+ {.v = 0xa6f55353a6f55353ULL}, {.v = 0xb968d1d1b968d1d1ULL},
+ {.v = 0x0000000000000000ULL}, {.v = 0xc12cededc12cededULL},
+ {.v = 0x4060202040602020ULL}, {.v = 0xe31ffcfce31ffcfcULL},
+ {.v = 0x79c8b1b179c8b1b1ULL}, {.v = 0xb6ed5b5bb6ed5b5bULL},
+ {.v = 0xd4be6a6ad4be6a6aULL}, {.v = 0x8d46cbcb8d46cbcbULL},
+ {.v = 0x67d9bebe67d9bebeULL}, {.v = 0x724b3939724b3939ULL},
+ {.v = 0x94de4a4a94de4a4aULL}, {.v = 0x98d44c4c98d44c4cULL},
+ {.v = 0xb0e85858b0e85858ULL}, {.v = 0x854acfcf854acfcfULL},
+ {.v = 0xbb6bd0d0bb6bd0d0ULL}, {.v = 0xc52aefefc52aefefULL},
+ {.v = 0x4fe5aaaa4fe5aaaaULL}, {.v = 0xed16fbfbed16fbfbULL},
+ {.v = 0x86c5434386c54343ULL}, {.v = 0x9ad74d4d9ad74d4dULL},
+ {.v = 0x6655333366553333ULL}, {.v = 0x1194858511948585ULL},
+ {.v = 0x8acf45458acf4545ULL}, {.v = 0xe910f9f9e910f9f9ULL},
+ {.v = 0x0406020204060202ULL}, {.v = 0xfe817f7ffe817f7fULL},
+ {.v = 0xa0f05050a0f05050ULL}, {.v = 0x78443c3c78443c3cULL},
+ {.v = 0x25ba9f9f25ba9f9fULL}, {.v = 0x4be3a8a84be3a8a8ULL},
+ {.v = 0xa2f35151a2f35151ULL}, {.v = 0x5dfea3a35dfea3a3ULL},
+ {.v = 0x80c0404080c04040ULL}, {.v = 0x058a8f8f058a8f8fULL},
+ {.v = 0x3fad92923fad9292ULL}, {.v = 0x21bc9d9d21bc9d9dULL},
+ {.v = 0x7048383870483838ULL}, {.v = 0xf104f5f5f104f5f5ULL},
+ {.v = 0x63dfbcbc63dfbcbcULL}, {.v = 0x77c1b6b677c1b6b6ULL},
+ {.v = 0xaf75dadaaf75dadaULL}, {.v = 0x4263212142632121ULL},
+ {.v = 0x2030101020301010ULL}, {.v = 0xe51affffe51affffULL},
+ {.v = 0xfd0ef3f3fd0ef3f3ULL}, {.v = 0xbf6dd2d2bf6dd2d2ULL},
+ {.v = 0x814ccdcd814ccdcdULL}, {.v = 0x18140c0c18140c0cULL},
+ {.v = 0x2635131326351313ULL}, {.v = 0xc32fececc32fececULL},
+ {.v = 0xbee15f5fbee15f5fULL}, {.v = 0x35a2979735a29797ULL},
+ {.v = 0x88cc444488cc4444ULL}, {.v = 0x2e3917172e391717ULL},
+ {.v = 0x9357c4c49357c4c4ULL}, {.v = 0x55f2a7a755f2a7a7ULL},
+ {.v = 0xfc827e7efc827e7eULL}, {.v = 0x7a473d3d7a473d3dULL},
+ {.v = 0xc8ac6464c8ac6464ULL}, {.v = 0xbae75d5dbae75d5dULL},
+ {.v = 0x322b1919322b1919ULL}, {.v = 0xe6957373e6957373ULL},
+ {.v = 0xc0a06060c0a06060ULL}, {.v = 0x1998818119988181ULL},
+ {.v = 0x9ed14f4f9ed14f4fULL}, {.v = 0xa37fdcdca37fdcdcULL},
+ {.v = 0x4466222244662222ULL}, {.v = 0x547e2a2a547e2a2aULL},
+ {.v = 0x3bab90903bab9090ULL}, {.v = 0x0b8388880b838888ULL},
+ {.v = 0x8cca46468cca4646ULL}, {.v = 0xc729eeeec729eeeeULL},
+ {.v = 0x6bd3b8b86bd3b8b8ULL}, {.v = 0x283c1414283c1414ULL},
+ {.v = 0xa779dedea779dedeULL}, {.v = 0xbce25e5ebce25e5eULL},
+ {.v = 0x161d0b0b161d0b0bULL}, {.v = 0xad76dbdbad76dbdbULL},
+ {.v = 0xdb3be0e0db3be0e0ULL}, {.v = 0x6456323264563232ULL},
+ {.v = 0x744e3a3a744e3a3aULL}, {.v = 0x141e0a0a141e0a0aULL},
+ {.v = 0x92db494992db4949ULL}, {.v = 0x0c0a06060c0a0606ULL},
+ {.v = 0x486c2424486c2424ULL}, {.v = 0xb8e45c5cb8e45c5cULL},
+ {.v = 0x9f5dc2c29f5dc2c2ULL}, {.v = 0xbd6ed3d3bd6ed3d3ULL},
+ {.v = 0x43efacac43efacacULL}, {.v = 0xc4a66262c4a66262ULL},
+ {.v = 0x39a8919139a89191ULL}, {.v = 0x31a4959531a49595ULL},
+ {.v = 0xd337e4e4d337e4e4ULL}, {.v = 0xf28b7979f28b7979ULL},
+ {.v = 0xd532e7e7d532e7e7ULL}, {.v = 0x8b43c8c88b43c8c8ULL},
+ {.v = 0x6e5937376e593737ULL}, {.v = 0xdab76d6ddab76d6dULL},
+ {.v = 0x018c8d8d018c8d8dULL}, {.v = 0xb164d5d5b164d5d5ULL},
+ {.v = 0x9cd24e4e9cd24e4eULL}, {.v = 0x49e0a9a949e0a9a9ULL},
+ {.v = 0xd8b46c6cd8b46c6cULL}, {.v = 0xacfa5656acfa5656ULL},
+ {.v = 0xf307f4f4f307f4f4ULL}, {.v = 0xcf25eaeacf25eaeaULL},
+ {.v = 0xcaaf6565caaf6565ULL}, {.v = 0xf48e7a7af48e7a7aULL},
+ {.v = 0x47e9aeae47e9aeaeULL}, {.v = 0x1018080810180808ULL},
+ {.v = 0x6fd5baba6fd5babaULL}, {.v = 0xf0887878f0887878ULL},
+ {.v = 0x4a6f25254a6f2525ULL}, {.v = 0x5c722e2e5c722e2eULL},
+ {.v = 0x38241c1c38241c1cULL}, {.v = 0x57f1a6a657f1a6a6ULL},
+ {.v = 0x73c7b4b473c7b4b4ULL}, {.v = 0x9751c6c69751c6c6ULL},
+ {.v = 0xcb23e8e8cb23e8e8ULL}, {.v = 0xa17cdddda17cddddULL},
+ {.v = 0xe89c7474e89c7474ULL}, {.v = 0x3e211f1f3e211f1fULL},
+ {.v = 0x96dd4b4b96dd4b4bULL}, {.v = 0x61dcbdbd61dcbdbdULL},
+ {.v = 0x0d868b8b0d868b8bULL}, {.v = 0x0f858a8a0f858a8aULL},
+ {.v = 0xe0907070e0907070ULL}, {.v = 0x7c423e3e7c423e3eULL},
+ {.v = 0x71c4b5b571c4b5b5ULL}, {.v = 0xccaa6666ccaa6666ULL},
+ {.v = 0x90d8484890d84848ULL}, {.v = 0x0605030306050303ULL},
+ {.v = 0xf701f6f6f701f6f6ULL}, {.v = 0x1c120e0e1c120e0eULL},
+ {.v = 0xc2a36161c2a36161ULL}, {.v = 0x6a5f35356a5f3535ULL},
+ {.v = 0xaef95757aef95757ULL}, {.v = 0x69d0b9b969d0b9b9ULL},
+ {.v = 0x1791868617918686ULL}, {.v = 0x9958c1c19958c1c1ULL},
+ {.v = 0x3a271d1d3a271d1dULL}, {.v = 0x27b99e9e27b99e9eULL},
+ {.v = 0xd938e1e1d938e1e1ULL}, {.v = 0xeb13f8f8eb13f8f8ULL},
+ {.v = 0x2bb398982bb39898ULL}, {.v = 0x2233111122331111ULL},
+ {.v = 0xd2bb6969d2bb6969ULL}, {.v = 0xa970d9d9a970d9d9ULL},
+ {.v = 0x07898e8e07898e8eULL}, {.v = 0x33a7949433a79494ULL},
+ {.v = 0x2db69b9b2db69b9bULL}, {.v = 0x3c221e1e3c221e1eULL},
+ {.v = 0x1592878715928787ULL}, {.v = 0xc920e9e9c920e9e9ULL},
+ {.v = 0x8749cece8749ceceULL}, {.v = 0xaaff5555aaff5555ULL},
+ {.v = 0x5078282850782828ULL}, {.v = 0xa57adfdfa57adfdfULL},
+ {.v = 0x038f8c8c038f8c8cULL}, {.v = 0x59f8a1a159f8a1a1ULL},
+ {.v = 0x0980898909808989ULL}, {.v = 0x1a170d0d1a170d0dULL},
+ {.v = 0x65dabfbf65dabfbfULL}, {.v = 0xd731e6e6d731e6e6ULL},
+ {.v = 0x84c6424284c64242ULL}, {.v = 0xd0b86868d0b86868ULL},
+ {.v = 0x82c3414182c34141ULL}, {.v = 0x29b0999929b09999ULL},
+ {.v = 0x5a772d2d5a772d2dULL}, {.v = 0x1e110f0f1e110f0fULL},
+ {.v = 0x7bcbb0b07bcbb0b0ULL}, {.v = 0xa8fc5454a8fc5454ULL},
+ {.v = 0x6dd6bbbb6dd6bbbbULL}, {.v = 0x2c3a16162c3a1616ULL}};
+
+IMB_DLL_LOCAL
+snow3gTableEntry_t snow3g_table_S2[256] = {
+ {.v = 0x4a6f25254a6f2525ULL}, {.v = 0x486c2424486c2424ULL},
+ {.v = 0xe6957373e6957373ULL}, {.v = 0xcea96767cea96767ULL},
+ {.v = 0xc710d7d7c710d7d7ULL}, {.v = 0x359baeae359baeaeULL},
+ {.v = 0xb8e45c5cb8e45c5cULL}, {.v = 0x6050303060503030ULL},
+ {.v = 0x2185a4a42185a4a4ULL}, {.v = 0xb55beeeeb55beeeeULL},
+ {.v = 0xdcb26e6edcb26e6eULL}, {.v = 0xff34cbcbff34cbcbULL},
+ {.v = 0xfa877d7dfa877d7dULL}, {.v = 0x03b6b5b503b6b5b5ULL},
+ {.v = 0x6def82826def8282ULL}, {.v = 0xdf04dbdbdf04dbdbULL},
+ {.v = 0xa145e4e4a145e4e4ULL}, {.v = 0x75fb8e8e75fb8e8eULL},
+ {.v = 0x90d8484890d84848ULL}, {.v = 0x92db494992db4949ULL},
+ {.v = 0x9ed14f4f9ed14f4fULL}, {.v = 0xbae75d5dbae75d5dULL},
+ {.v = 0xd4be6a6ad4be6a6aULL}, {.v = 0xf0887878f0887878ULL},
+ {.v = 0xe0907070e0907070ULL}, {.v = 0x79f1888879f18888ULL},
+ {.v = 0xb951e8e8b951e8e8ULL}, {.v = 0xbee15f5fbee15f5fULL},
+ {.v = 0xbce25e5ebce25e5eULL}, {.v = 0x61e5848461e58484ULL},
+ {.v = 0xcaaf6565caaf6565ULL}, {.v = 0xad4fe2e2ad4fe2e2ULL},
+ {.v = 0xd901d8d8d901d8d8ULL}, {.v = 0xbb52e9e9bb52e9e9ULL},
+ {.v = 0xf13dccccf13dccccULL}, {.v = 0xb35eededb35eededULL},
+ {.v = 0x80c0404080c04040ULL}, {.v = 0x5e712f2f5e712f2fULL},
+ {.v = 0x2233111122331111ULL}, {.v = 0x5078282850782828ULL},
+ {.v = 0xaef95757aef95757ULL}, {.v = 0xcd1fd2d2cd1fd2d2ULL},
+ {.v = 0x319dacac319dacacULL}, {.v = 0xaf4ce3e3af4ce3e3ULL},
+ {.v = 0x94de4a4a94de4a4aULL}, {.v = 0x2a3f15152a3f1515ULL},
+ {.v = 0x362d1b1b362d1b1bULL}, {.v = 0x1ba2b9b91ba2b9b9ULL},
+ {.v = 0x0dbfb2b20dbfb2b2ULL}, {.v = 0x69e9808069e98080ULL},
+ {.v = 0x63e6858563e68585ULL}, {.v = 0x2583a6a62583a6a6ULL},
+ {.v = 0x5c722e2e5c722e2eULL}, {.v = 0x0406020204060202ULL},
+ {.v = 0x8ec947478ec94747ULL}, {.v = 0x527b2929527b2929ULL},
+ {.v = 0x0e0907070e090707ULL}, {.v = 0x96dd4b4b96dd4b4bULL},
+ {.v = 0x1c120e0e1c120e0eULL}, {.v = 0xeb2ac1c1eb2ac1c1ULL},
+ {.v = 0xa2f35151a2f35151ULL}, {.v = 0x3d97aaaa3d97aaaaULL},
+ {.v = 0x7bf289897bf28989ULL}, {.v = 0xc115d4d4c115d4d4ULL},
+ {.v = 0xfd37cacafd37cacaULL}, {.v = 0x0203010102030101ULL},
+ {.v = 0x8cca46468cca4646ULL}, {.v = 0x0fbcb3b30fbcb3b3ULL},
+ {.v = 0xb758efefb758efefULL}, {.v = 0xd30eddddd30eddddULL},
+ {.v = 0x88cc444488cc4444ULL}, {.v = 0xf68d7b7bf68d7b7bULL},
+ {.v = 0xed2fc2c2ed2fc2c2ULL}, {.v = 0xfe817f7ffe817f7fULL},
+ {.v = 0x15abbebe15abbebeULL}, {.v = 0xef2cc3c3ef2cc3c3ULL},
+ {.v = 0x57c89f9f57c89f9fULL}, {.v = 0x4060202040602020ULL},
+ {.v = 0x98d44c4c98d44c4cULL}, {.v = 0xc8ac6464c8ac6464ULL},
+ {.v = 0x6fec83836fec8383ULL}, {.v = 0x2d8fa2a22d8fa2a2ULL},
+ {.v = 0xd0b86868d0b86868ULL}, {.v = 0x84c6424284c64242ULL},
+ {.v = 0x2635131326351313ULL}, {.v = 0x01b5b4b401b5b4b4ULL},
+ {.v = 0x82c3414182c34141ULL}, {.v = 0xf33ecdcdf33ecdcdULL},
+ {.v = 0x1da7baba1da7babaULL}, {.v = 0xe523c6c6e523c6c6ULL},
+ {.v = 0x1fa4bbbb1fa4bbbbULL}, {.v = 0xdab76d6ddab76d6dULL},
+ {.v = 0x9ad74d4d9ad74d4dULL}, {.v = 0xe2937171e2937171ULL},
+ {.v = 0x4263212142632121ULL}, {.v = 0x8175f4f48175f4f4ULL},
+ {.v = 0x73fe8d8d73fe8d8dULL}, {.v = 0x09b9b0b009b9b0b0ULL},
+ {.v = 0xa346e5e5a346e5e5ULL}, {.v = 0x4fdc93934fdc9393ULL},
+ {.v = 0x956bfefe956bfefeULL}, {.v = 0x77f88f8f77f88f8fULL},
+ {.v = 0xa543e6e6a543e6e6ULL}, {.v = 0xf738cfcff738cfcfULL},
+ {.v = 0x86c5434386c54343ULL}, {.v = 0x8acf45458acf4545ULL},
+ {.v = 0x6253313162533131ULL}, {.v = 0x4466222244662222ULL},
+ {.v = 0x6e5937376e593737ULL}, {.v = 0x6c5a36366c5a3636ULL},
+ {.v = 0x45d3969645d39696ULL}, {.v = 0x9d67fafa9d67fafaULL},
+ {.v = 0x11adbcbc11adbcbcULL}, {.v = 0x1e110f0f1e110f0fULL},
+ {.v = 0x1018080810180808ULL}, {.v = 0xa4f65252a4f65252ULL},
+ {.v = 0x3a271d1d3a271d1dULL}, {.v = 0xaaff5555aaff5555ULL},
+ {.v = 0x342e1a1a342e1a1aULL}, {.v = 0xe326c5c5e326c5c5ULL},
+ {.v = 0x9cd24e4e9cd24e4eULL}, {.v = 0x4665232346652323ULL},
+ {.v = 0xd2bb6969d2bb6969ULL}, {.v = 0xf48e7a7af48e7a7aULL},
+ {.v = 0x4ddf92924ddf9292ULL}, {.v = 0x9768ffff9768ffffULL},
+ {.v = 0xb6ed5b5bb6ed5b5bULL}, {.v = 0xb4ee5a5ab4ee5a5aULL},
+ {.v = 0xbf54ebebbf54ebebULL}, {.v = 0x5dc79a9a5dc79a9aULL},
+ {.v = 0x38241c1c38241c1cULL}, {.v = 0x3b92a9a93b92a9a9ULL},
+ {.v = 0xcb1ad1d1cb1ad1d1ULL}, {.v = 0xfc827e7efc827e7eULL},
+ {.v = 0x1a170d0d1a170d0dULL}, {.v = 0x916dfcfc916dfcfcULL},
+ {.v = 0xa0f05050a0f05050ULL}, {.v = 0x7df78a8a7df78a8aULL},
+ {.v = 0x05b3b6b605b3b6b6ULL}, {.v = 0xc4a66262c4a66262ULL},
+ {.v = 0x8376f5f58376f5f5ULL}, {.v = 0x141e0a0a141e0a0aULL},
+ {.v = 0x9961f8f89961f8f8ULL}, {.v = 0xd10ddcdcd10ddcdcULL},
+ {.v = 0x0605030306050303ULL}, {.v = 0x78443c3c78443c3cULL},
+ {.v = 0x18140c0c18140c0cULL}, {.v = 0x724b3939724b3939ULL},
+ {.v = 0x8b7af1f18b7af1f1ULL}, {.v = 0x19a1b8b819a1b8b8ULL},
+ {.v = 0x8f7cf3f38f7cf3f3ULL}, {.v = 0x7a473d3d7a473d3dULL},
+ {.v = 0x8d7ff2f28d7ff2f2ULL}, {.v = 0xc316d5d5c316d5d5ULL},
+ {.v = 0x47d0979747d09797ULL}, {.v = 0xccaa6666ccaa6666ULL},
+ {.v = 0x6bea81816bea8181ULL}, {.v = 0x6456323264563232ULL},
+ {.v = 0x2989a0a02989a0a0ULL}, {.v = 0x0000000000000000ULL},
+ {.v = 0x0c0a06060c0a0606ULL}, {.v = 0xf53bcecef53bceceULL},
+ {.v = 0x8573f6f68573f6f6ULL}, {.v = 0xbd57eaeabd57eaeaULL},
+ {.v = 0x07b0b7b707b0b7b7ULL}, {.v = 0x2e3917172e391717ULL},
+ {.v = 0x8770f7f78770f7f7ULL}, {.v = 0x71fd8c8c71fd8c8cULL},
+ {.v = 0xf28b7979f28b7979ULL}, {.v = 0xc513d6d6c513d6d6ULL},
+ {.v = 0x2780a7a72780a7a7ULL}, {.v = 0x17a8bfbf17a8bfbfULL},
+ {.v = 0x7ff48b8b7ff48b8bULL}, {.v = 0x7e413f3f7e413f3fULL},
+ {.v = 0x3e211f1f3e211f1fULL}, {.v = 0xa6f55353a6f55353ULL},
+ {.v = 0xc6a56363c6a56363ULL}, {.v = 0xea9f7575ea9f7575ULL},
+ {.v = 0x6a5f35356a5f3535ULL}, {.v = 0x58742c2c58742c2cULL},
+ {.v = 0xc0a06060c0a06060ULL}, {.v = 0x936efdfd936efdfdULL},
+ {.v = 0x4e6927274e692727ULL}, {.v = 0xcf1cd3d3cf1cd3d3ULL},
+ {.v = 0x41d5949441d59494ULL}, {.v = 0x2386a5a52386a5a5ULL},
+ {.v = 0xf8847c7cf8847c7cULL}, {.v = 0x2b8aa1a12b8aa1a1ULL},
+ {.v = 0x0a0f05050a0f0505ULL}, {.v = 0xb0e85858b0e85858ULL},
+ {.v = 0x5a772d2d5a772d2dULL}, {.v = 0x13aebdbd13aebdbdULL},
+ {.v = 0xdb02d9d9db02d9d9ULL}, {.v = 0xe720c7c7e720c7c7ULL},
+ {.v = 0x3798afaf3798afafULL}, {.v = 0xd6bd6b6bd6bd6b6bULL},
+ {.v = 0xa8fc5454a8fc5454ULL}, {.v = 0x161d0b0b161d0b0bULL},
+ {.v = 0xa949e0e0a949e0e0ULL}, {.v = 0x7048383870483838ULL},
+ {.v = 0x080c0404080c0404ULL}, {.v = 0xf931c8c8f931c8c8ULL},
+ {.v = 0x53ce9d9d53ce9d9dULL}, {.v = 0xa740e7e7a740e7e7ULL},
+ {.v = 0x283c1414283c1414ULL}, {.v = 0x0bbab1b10bbab1b1ULL},
+ {.v = 0x67e0878767e08787ULL}, {.v = 0x51cd9c9c51cd9c9cULL},
+ {.v = 0xd708dfdfd708dfdfULL}, {.v = 0xdeb16f6fdeb16f6fULL},
+ {.v = 0x9b62f9f99b62f9f9ULL}, {.v = 0xdd07dadadd07dadaULL},
+ {.v = 0x547e2a2a547e2a2aULL}, {.v = 0xe125c4c4e125c4c4ULL},
+ {.v = 0xb2eb5959b2eb5959ULL}, {.v = 0x2c3a16162c3a1616ULL},
+ {.v = 0xe89c7474e89c7474ULL}, {.v = 0x4bda91914bda9191ULL},
+ {.v = 0x3f94abab3f94ababULL}, {.v = 0x4c6a26264c6a2626ULL},
+ {.v = 0xc2a36161c2a36161ULL}, {.v = 0xec9a7676ec9a7676ULL},
+ {.v = 0x685c3434685c3434ULL}, {.v = 0x567d2b2b567d2b2bULL},
+ {.v = 0x339eadad339eadadULL}, {.v = 0x5bc299995bc29999ULL},
+ {.v = 0x9f64fbfb9f64fbfbULL}, {.v = 0xe4967272e4967272ULL},
+ {.v = 0xb15dececb15dececULL}, {.v = 0x6655333366553333ULL},
+ {.v = 0x2436121224361212ULL}, {.v = 0xd50bdeded50bdedeULL},
+ {.v = 0x59c1989859c19898ULL}, {.v = 0x764d3b3b764d3b3bULL},
+ {.v = 0xe929c0c0e929c0c0ULL}, {.v = 0x5fc49b9b5fc49b9bULL},
+ {.v = 0x7c423e3e7c423e3eULL}, {.v = 0x3028181830281818ULL},
+ {.v = 0x2030101020301010ULL}, {.v = 0x744e3a3a744e3a3aULL},
+ {.v = 0xacfa5656acfa5656ULL}, {.v = 0xab4ae1e1ab4ae1e1ULL},
+ {.v = 0xee997777ee997777ULL}, {.v = 0xfb32c9c9fb32c9c9ULL},
+ {.v = 0x3c221e1e3c221e1eULL}, {.v = 0x55cb9e9e55cb9e9eULL},
+ {.v = 0x43d6959543d69595ULL}, {.v = 0x2f8ca3a32f8ca3a3ULL},
+ {.v = 0x49d9909049d99090ULL}, {.v = 0x322b1919322b1919ULL},
+ {.v = 0x3991a8a83991a8a8ULL}, {.v = 0xd8b46c6cd8b46c6cULL},
+ {.v = 0x121b0909121b0909ULL}, {.v = 0xc919d0d0c919d0d0ULL},
+ {.v = 0x8979f0f08979f0f0ULL}, {.v = 0x65e3868665e38686ULL}};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S1_T0[256], 32) = {
+ 0xA56363C6, 0x847C7CF8, 0x997777EE, 0x8D7B7BF6, 0xDF2F2FF, 0xBD6B6BD6,
+ 0xB16F6FDE, 0x54C5C591, 0x50303060, 0x3010102, 0xA96767CE, 0x7D2B2B56,
+ 0x19FEFEE7, 0x62D7D7B5, 0xE6ABAB4D, 0x9A7676EC, 0x45CACA8F, 0x9D82821F,
+ 0x40C9C989, 0x877D7DFA, 0x15FAFAEF, 0xEB5959B2, 0xC947478E, 0xBF0F0FB,
+ 0xECADAD41, 0x67D4D4B3, 0xFDA2A25F, 0xEAAFAF45, 0xBF9C9C23, 0xF7A4A453,
+ 0x967272E4, 0x5BC0C09B, 0xC2B7B775, 0x1CFDFDE1, 0xAE93933D, 0x6A26264C,
+ 0x5A36366C, 0x413F3F7E, 0x2F7F7F5, 0x4FCCCC83, 0x5C343468, 0xF4A5A551,
+ 0x34E5E5D1, 0x8F1F1F9, 0x937171E2, 0x73D8D8AB, 0x53313162, 0x3F15152A,
+ 0xC040408, 0x52C7C795, 0x65232346, 0x5EC3C39D, 0x28181830, 0xA1969637,
+ 0xF05050A, 0xB59A9A2F, 0x907070E, 0x36121224, 0x9B80801B, 0x3DE2E2DF,
+ 0x26EBEBCD, 0x6927274E, 0xCDB2B27F, 0x9F7575EA, 0x1B090912, 0x9E83831D,
+ 0x742C2C58, 0x2E1A1A34, 0x2D1B1B36, 0xB26E6EDC, 0xEE5A5AB4, 0xFBA0A05B,
+ 0xF65252A4, 0x4D3B3B76, 0x61D6D6B7, 0xCEB3B37D, 0x7B292952, 0x3EE3E3DD,
+ 0x712F2F5E, 0x97848413, 0xF55353A6, 0x68D1D1B9, 0x0, 0x2CEDEDC1,
+ 0x60202040, 0x1FFCFCE3, 0xC8B1B179, 0xED5B5BB6, 0xBE6A6AD4, 0x46CBCB8D,
+ 0xD9BEBE67, 0x4B393972, 0xDE4A4A94, 0xD44C4C98, 0xE85858B0, 0x4ACFCF85,
+ 0x6BD0D0BB, 0x2AEFEFC5, 0xE5AAAA4F, 0x16FBFBED, 0xC5434386, 0xD74D4D9A,
+ 0x55333366, 0x94858511, 0xCF45458A, 0x10F9F9E9, 0x6020204, 0x817F7FFE,
+ 0xF05050A0, 0x443C3C78, 0xBA9F9F25, 0xE3A8A84B, 0xF35151A2, 0xFEA3A35D,
+ 0xC0404080, 0x8A8F8F05, 0xAD92923F, 0xBC9D9D21, 0x48383870, 0x4F5F5F1,
+ 0xDFBCBC63, 0xC1B6B677, 0x75DADAAF, 0x63212142, 0x30101020, 0x1AFFFFE5,
+ 0xEF3F3FD, 0x6DD2D2BF, 0x4CCDCD81, 0x140C0C18, 0x35131326, 0x2FECECC3,
+ 0xE15F5FBE, 0xA2979735, 0xCC444488, 0x3917172E, 0x57C4C493, 0xF2A7A755,
+ 0x827E7EFC, 0x473D3D7A, 0xAC6464C8, 0xE75D5DBA, 0x2B191932, 0x957373E6,
+ 0xA06060C0, 0x98818119, 0xD14F4F9E, 0x7FDCDCA3, 0x66222244, 0x7E2A2A54,
+ 0xAB90903B, 0x8388880B, 0xCA46468C, 0x29EEEEC7, 0xD3B8B86B, 0x3C141428,
+ 0x79DEDEA7, 0xE25E5EBC, 0x1D0B0B16, 0x76DBDBAD, 0x3BE0E0DB, 0x56323264,
+ 0x4E3A3A74, 0x1E0A0A14, 0xDB494992, 0xA06060C, 0x6C242448, 0xE45C5CB8,
+ 0x5DC2C29F, 0x6ED3D3BD, 0xEFACAC43, 0xA66262C4, 0xA8919139, 0xA4959531,
+ 0x37E4E4D3, 0x8B7979F2, 0x32E7E7D5, 0x43C8C88B, 0x5937376E, 0xB76D6DDA,
+ 0x8C8D8D01, 0x64D5D5B1, 0xD24E4E9C, 0xE0A9A949, 0xB46C6CD8, 0xFA5656AC,
+ 0x7F4F4F3, 0x25EAEACF, 0xAF6565CA, 0x8E7A7AF4, 0xE9AEAE47, 0x18080810,
+ 0xD5BABA6F, 0x887878F0, 0x6F25254A, 0x722E2E5C, 0x241C1C38, 0xF1A6A657,
+ 0xC7B4B473, 0x51C6C697, 0x23E8E8CB, 0x7CDDDDA1, 0x9C7474E8, 0x211F1F3E,
+ 0xDD4B4B96, 0xDCBDBD61, 0x868B8B0D, 0x858A8A0F, 0x907070E0, 0x423E3E7C,
+ 0xC4B5B571, 0xAA6666CC, 0xD8484890, 0x5030306, 0x1F6F6F7, 0x120E0E1C,
+ 0xA36161C2, 0x5F35356A, 0xF95757AE, 0xD0B9B969, 0x91868617, 0x58C1C199,
+ 0x271D1D3A, 0xB99E9E27, 0x38E1E1D9, 0x13F8F8EB, 0xB398982B, 0x33111122,
+ 0xBB6969D2, 0x70D9D9A9, 0x898E8E07, 0xA7949433, 0xB69B9B2D, 0x221E1E3C,
+ 0x92878715, 0x20E9E9C9, 0x49CECE87, 0xFF5555AA, 0x78282850, 0x7ADFDFA5,
+ 0x8F8C8C03, 0xF8A1A159, 0x80898909, 0x170D0D1A, 0xDABFBF65, 0x31E6E6D7,
+ 0xC6424284, 0xB86868D0, 0xC3414182, 0xB0999929, 0x772D2D5A, 0x110F0F1E,
+ 0xCBB0B07B, 0xFC5454A8, 0xD6BBBB6D, 0x3A16162C};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S1_T1[256], 32) = {
+ 0x6363C6A5, 0x7C7CF884, 0x7777EE99, 0x7B7BF68D, 0xF2F2FF0D, 0x6B6BD6BD,
+ 0x6F6FDEB1, 0xC5C59154, 0x30306050, 0x1010203, 0x6767CEA9, 0x2B2B567D,
+ 0xFEFEE719, 0xD7D7B562, 0xABAB4DE6, 0x7676EC9A, 0xCACA8F45, 0x82821F9D,
+ 0xC9C98940, 0x7D7DFA87, 0xFAFAEF15, 0x5959B2EB, 0x47478EC9, 0xF0F0FB0B,
+ 0xADAD41EC, 0xD4D4B367, 0xA2A25FFD, 0xAFAF45EA, 0x9C9C23BF, 0xA4A453F7,
+ 0x7272E496, 0xC0C09B5B, 0xB7B775C2, 0xFDFDE11C, 0x93933DAE, 0x26264C6A,
+ 0x36366C5A, 0x3F3F7E41, 0xF7F7F502, 0xCCCC834F, 0x3434685C, 0xA5A551F4,
+ 0xE5E5D134, 0xF1F1F908, 0x7171E293, 0xD8D8AB73, 0x31316253, 0x15152A3F,
+ 0x404080C, 0xC7C79552, 0x23234665, 0xC3C39D5E, 0x18183028, 0x969637A1,
+ 0x5050A0F, 0x9A9A2FB5, 0x7070E09, 0x12122436, 0x80801B9B, 0xE2E2DF3D,
+ 0xEBEBCD26, 0x27274E69, 0xB2B27FCD, 0x7575EA9F, 0x909121B, 0x83831D9E,
+ 0x2C2C5874, 0x1A1A342E, 0x1B1B362D, 0x6E6EDCB2, 0x5A5AB4EE, 0xA0A05BFB,
+ 0x5252A4F6, 0x3B3B764D, 0xD6D6B761, 0xB3B37DCE, 0x2929527B, 0xE3E3DD3E,
+ 0x2F2F5E71, 0x84841397, 0x5353A6F5, 0xD1D1B968, 0x0, 0xEDEDC12C,
+ 0x20204060, 0xFCFCE31F, 0xB1B179C8, 0x5B5BB6ED, 0x6A6AD4BE, 0xCBCB8D46,
+ 0xBEBE67D9, 0x3939724B, 0x4A4A94DE, 0x4C4C98D4, 0x5858B0E8, 0xCFCF854A,
+ 0xD0D0BB6B, 0xEFEFC52A, 0xAAAA4FE5, 0xFBFBED16, 0x434386C5, 0x4D4D9AD7,
+ 0x33336655, 0x85851194, 0x45458ACF, 0xF9F9E910, 0x2020406, 0x7F7FFE81,
+ 0x5050A0F0, 0x3C3C7844, 0x9F9F25BA, 0xA8A84BE3, 0x5151A2F3, 0xA3A35DFE,
+ 0x404080C0, 0x8F8F058A, 0x92923FAD, 0x9D9D21BC, 0x38387048, 0xF5F5F104,
+ 0xBCBC63DF, 0xB6B677C1, 0xDADAAF75, 0x21214263, 0x10102030, 0xFFFFE51A,
+ 0xF3F3FD0E, 0xD2D2BF6D, 0xCDCD814C, 0xC0C1814, 0x13132635, 0xECECC32F,
+ 0x5F5FBEE1, 0x979735A2, 0x444488CC, 0x17172E39, 0xC4C49357, 0xA7A755F2,
+ 0x7E7EFC82, 0x3D3D7A47, 0x6464C8AC, 0x5D5DBAE7, 0x1919322B, 0x7373E695,
+ 0x6060C0A0, 0x81811998, 0x4F4F9ED1, 0xDCDCA37F, 0x22224466, 0x2A2A547E,
+ 0x90903BAB, 0x88880B83, 0x46468CCA, 0xEEEEC729, 0xB8B86BD3, 0x1414283C,
+ 0xDEDEA779, 0x5E5EBCE2, 0xB0B161D, 0xDBDBAD76, 0xE0E0DB3B, 0x32326456,
+ 0x3A3A744E, 0xA0A141E, 0x494992DB, 0x6060C0A, 0x2424486C, 0x5C5CB8E4,
+ 0xC2C29F5D, 0xD3D3BD6E, 0xACAC43EF, 0x6262C4A6, 0x919139A8, 0x959531A4,
+ 0xE4E4D337, 0x7979F28B, 0xE7E7D532, 0xC8C88B43, 0x37376E59, 0x6D6DDAB7,
+ 0x8D8D018C, 0xD5D5B164, 0x4E4E9CD2, 0xA9A949E0, 0x6C6CD8B4, 0x5656ACFA,
+ 0xF4F4F307, 0xEAEACF25, 0x6565CAAF, 0x7A7AF48E, 0xAEAE47E9, 0x8081018,
+ 0xBABA6FD5, 0x7878F088, 0x25254A6F, 0x2E2E5C72, 0x1C1C3824, 0xA6A657F1,
+ 0xB4B473C7, 0xC6C69751, 0xE8E8CB23, 0xDDDDA17C, 0x7474E89C, 0x1F1F3E21,
+ 0x4B4B96DD, 0xBDBD61DC, 0x8B8B0D86, 0x8A8A0F85, 0x7070E090, 0x3E3E7C42,
+ 0xB5B571C4, 0x6666CCAA, 0x484890D8, 0x3030605, 0xF6F6F701, 0xE0E1C12,
+ 0x6161C2A3, 0x35356A5F, 0x5757AEF9, 0xB9B969D0, 0x86861791, 0xC1C19958,
+ 0x1D1D3A27, 0x9E9E27B9, 0xE1E1D938, 0xF8F8EB13, 0x98982BB3, 0x11112233,
+ 0x6969D2BB, 0xD9D9A970, 0x8E8E0789, 0x949433A7, 0x9B9B2DB6, 0x1E1E3C22,
+ 0x87871592, 0xE9E9C920, 0xCECE8749, 0x5555AAFF, 0x28285078, 0xDFDFA57A,
+ 0x8C8C038F, 0xA1A159F8, 0x89890980, 0xD0D1A17, 0xBFBF65DA, 0xE6E6D731,
+ 0x424284C6, 0x6868D0B8, 0x414182C3, 0x999929B0, 0x2D2D5A77, 0xF0F1E11,
+ 0xB0B07BCB, 0x5454A8FC, 0xBBBB6DD6, 0x16162C3A};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S1_T2[256], 32) = {
+ 0x63C6A563, 0x7CF8847C, 0x77EE9977, 0x7BF68D7B, 0xF2FF0DF2, 0x6BD6BD6B,
+ 0x6FDEB16F, 0xC59154C5, 0x30605030, 0x1020301, 0x67CEA967, 0x2B567D2B,
+ 0xFEE719FE, 0xD7B562D7, 0xAB4DE6AB, 0x76EC9A76, 0xCA8F45CA, 0x821F9D82,
+ 0xC98940C9, 0x7DFA877D, 0xFAEF15FA, 0x59B2EB59, 0x478EC947, 0xF0FB0BF0,
+ 0xAD41ECAD, 0xD4B367D4, 0xA25FFDA2, 0xAF45EAAF, 0x9C23BF9C, 0xA453F7A4,
+ 0x72E49672, 0xC09B5BC0, 0xB775C2B7, 0xFDE11CFD, 0x933DAE93, 0x264C6A26,
+ 0x366C5A36, 0x3F7E413F, 0xF7F502F7, 0xCC834FCC, 0x34685C34, 0xA551F4A5,
+ 0xE5D134E5, 0xF1F908F1, 0x71E29371, 0xD8AB73D8, 0x31625331, 0x152A3F15,
+ 0x4080C04, 0xC79552C7, 0x23466523, 0xC39D5EC3, 0x18302818, 0x9637A196,
+ 0x50A0F05, 0x9A2FB59A, 0x70E0907, 0x12243612, 0x801B9B80, 0xE2DF3DE2,
+ 0xEBCD26EB, 0x274E6927, 0xB27FCDB2, 0x75EA9F75, 0x9121B09, 0x831D9E83,
+ 0x2C58742C, 0x1A342E1A, 0x1B362D1B, 0x6EDCB26E, 0x5AB4EE5A, 0xA05BFBA0,
+ 0x52A4F652, 0x3B764D3B, 0xD6B761D6, 0xB37DCEB3, 0x29527B29, 0xE3DD3EE3,
+ 0x2F5E712F, 0x84139784, 0x53A6F553, 0xD1B968D1, 0x0, 0xEDC12CED,
+ 0x20406020, 0xFCE31FFC, 0xB179C8B1, 0x5BB6ED5B, 0x6AD4BE6A, 0xCB8D46CB,
+ 0xBE67D9BE, 0x39724B39, 0x4A94DE4A, 0x4C98D44C, 0x58B0E858, 0xCF854ACF,
+ 0xD0BB6BD0, 0xEFC52AEF, 0xAA4FE5AA, 0xFBED16FB, 0x4386C543, 0x4D9AD74D,
+ 0x33665533, 0x85119485, 0x458ACF45, 0xF9E910F9, 0x2040602, 0x7FFE817F,
+ 0x50A0F050, 0x3C78443C, 0x9F25BA9F, 0xA84BE3A8, 0x51A2F351, 0xA35DFEA3,
+ 0x4080C040, 0x8F058A8F, 0x923FAD92, 0x9D21BC9D, 0x38704838, 0xF5F104F5,
+ 0xBC63DFBC, 0xB677C1B6, 0xDAAF75DA, 0x21426321, 0x10203010, 0xFFE51AFF,
+ 0xF3FD0EF3, 0xD2BF6DD2, 0xCD814CCD, 0xC18140C, 0x13263513, 0xECC32FEC,
+ 0x5FBEE15F, 0x9735A297, 0x4488CC44, 0x172E3917, 0xC49357C4, 0xA755F2A7,
+ 0x7EFC827E, 0x3D7A473D, 0x64C8AC64, 0x5DBAE75D, 0x19322B19, 0x73E69573,
+ 0x60C0A060, 0x81199881, 0x4F9ED14F, 0xDCA37FDC, 0x22446622, 0x2A547E2A,
+ 0x903BAB90, 0x880B8388, 0x468CCA46, 0xEEC729EE, 0xB86BD3B8, 0x14283C14,
+ 0xDEA779DE, 0x5EBCE25E, 0xB161D0B, 0xDBAD76DB, 0xE0DB3BE0, 0x32645632,
+ 0x3A744E3A, 0xA141E0A, 0x4992DB49, 0x60C0A06, 0x24486C24, 0x5CB8E45C,
+ 0xC29F5DC2, 0xD3BD6ED3, 0xAC43EFAC, 0x62C4A662, 0x9139A891, 0x9531A495,
+ 0xE4D337E4, 0x79F28B79, 0xE7D532E7, 0xC88B43C8, 0x376E5937, 0x6DDAB76D,
+ 0x8D018C8D, 0xD5B164D5, 0x4E9CD24E, 0xA949E0A9, 0x6CD8B46C, 0x56ACFA56,
+ 0xF4F307F4, 0xEACF25EA, 0x65CAAF65, 0x7AF48E7A, 0xAE47E9AE, 0x8101808,
+ 0xBA6FD5BA, 0x78F08878, 0x254A6F25, 0x2E5C722E, 0x1C38241C, 0xA657F1A6,
+ 0xB473C7B4, 0xC69751C6, 0xE8CB23E8, 0xDDA17CDD, 0x74E89C74, 0x1F3E211F,
+ 0x4B96DD4B, 0xBD61DCBD, 0x8B0D868B, 0x8A0F858A, 0x70E09070, 0x3E7C423E,
+ 0xB571C4B5, 0x66CCAA66, 0x4890D848, 0x3060503, 0xF6F701F6, 0xE1C120E,
+ 0x61C2A361, 0x356A5F35, 0x57AEF957, 0xB969D0B9, 0x86179186, 0xC19958C1,
+ 0x1D3A271D, 0x9E27B99E, 0xE1D938E1, 0xF8EB13F8, 0x982BB398, 0x11223311,
+ 0x69D2BB69, 0xD9A970D9, 0x8E07898E, 0x9433A794, 0x9B2DB69B, 0x1E3C221E,
+ 0x87159287, 0xE9C920E9, 0xCE8749CE, 0x55AAFF55, 0x28507828, 0xDFA57ADF,
+ 0x8C038F8C, 0xA159F8A1, 0x89098089, 0xD1A170D, 0xBF65DABF, 0xE6D731E6,
+ 0x4284C642, 0x68D0B868, 0x4182C341, 0x9929B099, 0x2D5A772D, 0xF1E110F,
+ 0xB07BCBB0, 0x54A8FC54, 0xBB6DD6BB, 0x162C3A16};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S1_T3[256], 32) = {
+ 0xC6A56363, 0xF8847C7C, 0xEE997777, 0xF68D7B7B, 0xFF0DF2F2, 0xD6BD6B6B,
+ 0xDEB16F6F, 0x9154C5C5, 0x60503030, 0x2030101, 0xCEA96767, 0x567D2B2B,
+ 0xE719FEFE, 0xB562D7D7, 0x4DE6ABAB, 0xEC9A7676, 0x8F45CACA, 0x1F9D8282,
+ 0x8940C9C9, 0xFA877D7D, 0xEF15FAFA, 0xB2EB5959, 0x8EC94747, 0xFB0BF0F0,
+ 0x41ECADAD, 0xB367D4D4, 0x5FFDA2A2, 0x45EAAFAF, 0x23BF9C9C, 0x53F7A4A4,
+ 0xE4967272, 0x9B5BC0C0, 0x75C2B7B7, 0xE11CFDFD, 0x3DAE9393, 0x4C6A2626,
+ 0x6C5A3636, 0x7E413F3F, 0xF502F7F7, 0x834FCCCC, 0x685C3434, 0x51F4A5A5,
+ 0xD134E5E5, 0xF908F1F1, 0xE2937171, 0xAB73D8D8, 0x62533131, 0x2A3F1515,
+ 0x80C0404, 0x9552C7C7, 0x46652323, 0x9D5EC3C3, 0x30281818, 0x37A19696,
+ 0xA0F0505, 0x2FB59A9A, 0xE090707, 0x24361212, 0x1B9B8080, 0xDF3DE2E2,
+ 0xCD26EBEB, 0x4E692727, 0x7FCDB2B2, 0xEA9F7575, 0x121B0909, 0x1D9E8383,
+ 0x58742C2C, 0x342E1A1A, 0x362D1B1B, 0xDCB26E6E, 0xB4EE5A5A, 0x5BFBA0A0,
+ 0xA4F65252, 0x764D3B3B, 0xB761D6D6, 0x7DCEB3B3, 0x527B2929, 0xDD3EE3E3,
+ 0x5E712F2F, 0x13978484, 0xA6F55353, 0xB968D1D1, 0x0, 0xC12CEDED,
+ 0x40602020, 0xE31FFCFC, 0x79C8B1B1, 0xB6ED5B5B, 0xD4BE6A6A, 0x8D46CBCB,
+ 0x67D9BEBE, 0x724B3939, 0x94DE4A4A, 0x98D44C4C, 0xB0E85858, 0x854ACFCF,
+ 0xBB6BD0D0, 0xC52AEFEF, 0x4FE5AAAA, 0xED16FBFB, 0x86C54343, 0x9AD74D4D,
+ 0x66553333, 0x11948585, 0x8ACF4545, 0xE910F9F9, 0x4060202, 0xFE817F7F,
+ 0xA0F05050, 0x78443C3C, 0x25BA9F9F, 0x4BE3A8A8, 0xA2F35151, 0x5DFEA3A3,
+ 0x80C04040, 0x58A8F8F, 0x3FAD9292, 0x21BC9D9D, 0x70483838, 0xF104F5F5,
+ 0x63DFBCBC, 0x77C1B6B6, 0xAF75DADA, 0x42632121, 0x20301010, 0xE51AFFFF,
+ 0xFD0EF3F3, 0xBF6DD2D2, 0x814CCDCD, 0x18140C0C, 0x26351313, 0xC32FECEC,
+ 0xBEE15F5F, 0x35A29797, 0x88CC4444, 0x2E391717, 0x9357C4C4, 0x55F2A7A7,
+ 0xFC827E7E, 0x7A473D3D, 0xC8AC6464, 0xBAE75D5D, 0x322B1919, 0xE6957373,
+ 0xC0A06060, 0x19988181, 0x9ED14F4F, 0xA37FDCDC, 0x44662222, 0x547E2A2A,
+ 0x3BAB9090, 0xB838888, 0x8CCA4646, 0xC729EEEE, 0x6BD3B8B8, 0x283C1414,
+ 0xA779DEDE, 0xBCE25E5E, 0x161D0B0B, 0xAD76DBDB, 0xDB3BE0E0, 0x64563232,
+ 0x744E3A3A, 0x141E0A0A, 0x92DB4949, 0xC0A0606, 0x486C2424, 0xB8E45C5C,
+ 0x9F5DC2C2, 0xBD6ED3D3, 0x43EFACAC, 0xC4A66262, 0x39A89191, 0x31A49595,
+ 0xD337E4E4, 0xF28B7979, 0xD532E7E7, 0x8B43C8C8, 0x6E593737, 0xDAB76D6D,
+ 0x18C8D8D, 0xB164D5D5, 0x9CD24E4E, 0x49E0A9A9, 0xD8B46C6C, 0xACFA5656,
+ 0xF307F4F4, 0xCF25EAEA, 0xCAAF6565, 0xF48E7A7A, 0x47E9AEAE, 0x10180808,
+ 0x6FD5BABA, 0xF0887878, 0x4A6F2525, 0x5C722E2E, 0x38241C1C, 0x57F1A6A6,
+ 0x73C7B4B4, 0x9751C6C6, 0xCB23E8E8, 0xA17CDDDD, 0xE89C7474, 0x3E211F1F,
+ 0x96DD4B4B, 0x61DCBDBD, 0xD868B8B, 0xF858A8A, 0xE0907070, 0x7C423E3E,
+ 0x71C4B5B5, 0xCCAA6666, 0x90D84848, 0x6050303, 0xF701F6F6, 0x1C120E0E,
+ 0xC2A36161, 0x6A5F3535, 0xAEF95757, 0x69D0B9B9, 0x17918686, 0x9958C1C1,
+ 0x3A271D1D, 0x27B99E9E, 0xD938E1E1, 0xEB13F8F8, 0x2BB39898, 0x22331111,
+ 0xD2BB6969, 0xA970D9D9, 0x7898E8E, 0x33A79494, 0x2DB69B9B, 0x3C221E1E,
+ 0x15928787, 0xC920E9E9, 0x8749CECE, 0xAAFF5555, 0x50782828, 0xA57ADFDF,
+ 0x38F8C8C, 0x59F8A1A1, 0x9808989, 0x1A170D0D, 0x65DABFBF, 0xD731E6E6,
+ 0x84C64242, 0xD0B86868, 0x82C34141, 0x29B09999, 0x5A772D2D, 0x1E110F0F,
+ 0x7BCBB0B0, 0xA8FC5454, 0x6DD6BBBB, 0x2C3A1616};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S2_T0[256], 32) = {
+ 0x6f25254a, 0x6c242448, 0x957373e6, 0xa96767ce, 0x10d7d7c7, 0x9baeae35,
+ 0xe45c5cb8, 0x50303060, 0x85a4a421, 0x5beeeeb5, 0xb26e6edc, 0x34cbcbff,
+ 0x877d7dfa, 0xb6b5b503, 0xef82826d, 0x04dbdbdf, 0x45e4e4a1, 0xfb8e8e75,
+ 0xd8484890, 0xdb494992, 0xd14f4f9e, 0xe75d5dba, 0xbe6a6ad4, 0x887878f0,
+ 0x907070e0, 0xf1888879, 0x51e8e8b9, 0xe15f5fbe, 0xe25e5ebc, 0xe5848461,
+ 0xaf6565ca, 0x4fe2e2ad, 0x01d8d8d9, 0x52e9e9bb, 0x3dccccf1, 0x5eededb3,
+ 0xc0404080, 0x712f2f5e, 0x33111122, 0x78282850, 0xf95757ae, 0x1fd2d2cd,
+ 0x9dacac31, 0x4ce3e3af, 0xde4a4a94, 0x3f15152a, 0x2d1b1b36, 0xa2b9b91b,
+ 0xbfb2b20d, 0xe9808069, 0xe6858563, 0x83a6a625, 0x722e2e5c, 0x06020204,
+ 0xc947478e, 0x7b292952, 0x0907070e, 0xdd4b4b96, 0x120e0e1c, 0x2ac1c1eb,
+ 0xf35151a2, 0x97aaaa3d, 0xf289897b, 0x15d4d4c1, 0x37cacafd, 0x03010102,
+ 0xca46468c, 0xbcb3b30f, 0x58efefb7, 0x0eddddd3, 0xcc444488, 0x8d7b7bf6,
+ 0x2fc2c2ed, 0x817f7ffe, 0xabbebe15, 0x2cc3c3ef, 0xc89f9f57, 0x60202040,
+ 0xd44c4c98, 0xac6464c8, 0xec83836f, 0x8fa2a22d, 0xb86868d0, 0xc6424284,
+ 0x35131326, 0xb5b4b401, 0xc3414182, 0x3ecdcdf3, 0xa7baba1d, 0x23c6c6e5,
+ 0xa4bbbb1f, 0xb76d6dda, 0xd74d4d9a, 0x937171e2, 0x63212142, 0x75f4f481,
+ 0xfe8d8d73, 0xb9b0b009, 0x46e5e5a3, 0xdc93934f, 0x6bfefe95, 0xf88f8f77,
+ 0x43e6e6a5, 0x38cfcff7, 0xc5434386, 0xcf45458a, 0x53313162, 0x66222244,
+ 0x5937376e, 0x5a36366c, 0xd3969645, 0x67fafa9d, 0xadbcbc11, 0x110f0f1e,
+ 0x18080810, 0xf65252a4, 0x271d1d3a, 0xff5555aa, 0x2e1a1a34, 0x26c5c5e3,
+ 0xd24e4e9c, 0x65232346, 0xbb6969d2, 0x8e7a7af4, 0xdf92924d, 0x68ffff97,
+ 0xed5b5bb6, 0xee5a5ab4, 0x54ebebbf, 0xc79a9a5d, 0x241c1c38, 0x92a9a93b,
+ 0x1ad1d1cb, 0x827e7efc, 0x170d0d1a, 0x6dfcfc91, 0xf05050a0, 0xf78a8a7d,
+ 0xb3b6b605, 0xa66262c4, 0x76f5f583, 0x1e0a0a14, 0x61f8f899, 0x0ddcdcd1,
+ 0x05030306, 0x443c3c78, 0x140c0c18, 0x4b393972, 0x7af1f18b, 0xa1b8b819,
+ 0x7cf3f38f, 0x473d3d7a, 0x7ff2f28d, 0x16d5d5c3, 0xd0979747, 0xaa6666cc,
+ 0xea81816b, 0x56323264, 0x89a0a029, 0x00000000, 0x0a06060c, 0x3bcecef5,
+ 0x73f6f685, 0x57eaeabd, 0xb0b7b707, 0x3917172e, 0x70f7f787, 0xfd8c8c71,
+ 0x8b7979f2, 0x13d6d6c5, 0x80a7a727, 0xa8bfbf17, 0xf48b8b7f, 0x413f3f7e,
+ 0x211f1f3e, 0xf55353a6, 0xa56363c6, 0x9f7575ea, 0x5f35356a, 0x742c2c58,
+ 0xa06060c0, 0x6efdfd93, 0x6927274e, 0x1cd3d3cf, 0xd5949441, 0x86a5a523,
+ 0x847c7cf8, 0x8aa1a12b, 0x0f05050a, 0xe85858b0, 0x772d2d5a, 0xaebdbd13,
+ 0x02d9d9db, 0x20c7c7e7, 0x98afaf37, 0xbd6b6bd6, 0xfc5454a8, 0x1d0b0b16,
+ 0x49e0e0a9, 0x48383870, 0x0c040408, 0x31c8c8f9, 0xce9d9d53, 0x40e7e7a7,
+ 0x3c141428, 0xbab1b10b, 0xe0878767, 0xcd9c9c51, 0x08dfdfd7, 0xb16f6fde,
+ 0x62f9f99b, 0x07dadadd, 0x7e2a2a54, 0x25c4c4e1, 0xeb5959b2, 0x3a16162c,
+ 0x9c7474e8, 0xda91914b, 0x94abab3f, 0x6a26264c, 0xa36161c2, 0x9a7676ec,
+ 0x5c343468, 0x7d2b2b56, 0x9eadad33, 0xc299995b, 0x64fbfb9f, 0x967272e4,
+ 0x5dececb1, 0x55333366, 0x36121224, 0x0bdeded5, 0xc1989859, 0x4d3b3b76,
+ 0x29c0c0e9, 0xc49b9b5f, 0x423e3e7c, 0x28181830, 0x30101020, 0x4e3a3a74,
+ 0xfa5656ac, 0x4ae1e1ab, 0x997777ee, 0x32c9c9fb, 0x221e1e3c, 0xcb9e9e55,
+ 0xd6959543, 0x8ca3a32f, 0xd9909049, 0x2b191932, 0x91a8a839, 0xb46c6cd8,
+ 0x1b090912, 0x19d0d0c9, 0x79f0f089, 0xe3868665};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S2_T1[256], 32) = {
+ 0x25254a6f, 0x2424486c, 0x7373e695, 0x6767cea9, 0xd7d7c710, 0xaeae359b,
+ 0x5c5cb8e4, 0x30306050, 0xa4a42185, 0xeeeeb55b, 0x6e6edcb2, 0xcbcbff34,
+ 0x7d7dfa87, 0xb5b503b6, 0x82826def, 0xdbdbdf04, 0xe4e4a145, 0x8e8e75fb,
+ 0x484890d8, 0x494992db, 0x4f4f9ed1, 0x5d5dbae7, 0x6a6ad4be, 0x7878f088,
+ 0x7070e090, 0x888879f1, 0xe8e8b951, 0x5f5fbee1, 0x5e5ebce2, 0x848461e5,
+ 0x6565caaf, 0xe2e2ad4f, 0xd8d8d901, 0xe9e9bb52, 0xccccf13d, 0xededb35e,
+ 0x404080c0, 0x2f2f5e71, 0x11112233, 0x28285078, 0x5757aef9, 0xd2d2cd1f,
+ 0xacac319d, 0xe3e3af4c, 0x4a4a94de, 0x15152a3f, 0x1b1b362d, 0xb9b91ba2,
+ 0xb2b20dbf, 0x808069e9, 0x858563e6, 0xa6a62583, 0x2e2e5c72, 0x02020406,
+ 0x47478ec9, 0x2929527b, 0x07070e09, 0x4b4b96dd, 0x0e0e1c12, 0xc1c1eb2a,
+ 0x5151a2f3, 0xaaaa3d97, 0x89897bf2, 0xd4d4c115, 0xcacafd37, 0x01010203,
+ 0x46468cca, 0xb3b30fbc, 0xefefb758, 0xddddd30e, 0x444488cc, 0x7b7bf68d,
+ 0xc2c2ed2f, 0x7f7ffe81, 0xbebe15ab, 0xc3c3ef2c, 0x9f9f57c8, 0x20204060,
+ 0x4c4c98d4, 0x6464c8ac, 0x83836fec, 0xa2a22d8f, 0x6868d0b8, 0x424284c6,
+ 0x13132635, 0xb4b401b5, 0x414182c3, 0xcdcdf33e, 0xbaba1da7, 0xc6c6e523,
+ 0xbbbb1fa4, 0x6d6ddab7, 0x4d4d9ad7, 0x7171e293, 0x21214263, 0xf4f48175,
+ 0x8d8d73fe, 0xb0b009b9, 0xe5e5a346, 0x93934fdc, 0xfefe956b, 0x8f8f77f8,
+ 0xe6e6a543, 0xcfcff738, 0x434386c5, 0x45458acf, 0x31316253, 0x22224466,
+ 0x37376e59, 0x36366c5a, 0x969645d3, 0xfafa9d67, 0xbcbc11ad, 0x0f0f1e11,
+ 0x08081018, 0x5252a4f6, 0x1d1d3a27, 0x5555aaff, 0x1a1a342e, 0xc5c5e326,
+ 0x4e4e9cd2, 0x23234665, 0x6969d2bb, 0x7a7af48e, 0x92924ddf, 0xffff9768,
+ 0x5b5bb6ed, 0x5a5ab4ee, 0xebebbf54, 0x9a9a5dc7, 0x1c1c3824, 0xa9a93b92,
+ 0xd1d1cb1a, 0x7e7efc82, 0x0d0d1a17, 0xfcfc916d, 0x5050a0f0, 0x8a8a7df7,
+ 0xb6b605b3, 0x6262c4a6, 0xf5f58376, 0x0a0a141e, 0xf8f89961, 0xdcdcd10d,
+ 0x03030605, 0x3c3c7844, 0x0c0c1814, 0x3939724b, 0xf1f18b7a, 0xb8b819a1,
+ 0xf3f38f7c, 0x3d3d7a47, 0xf2f28d7f, 0xd5d5c316, 0x979747d0, 0x6666ccaa,
+ 0x81816bea, 0x32326456, 0xa0a02989, 0x00000000, 0x06060c0a, 0xcecef53b,
+ 0xf6f68573, 0xeaeabd57, 0xb7b707b0, 0x17172e39, 0xf7f78770, 0x8c8c71fd,
+ 0x7979f28b, 0xd6d6c513, 0xa7a72780, 0xbfbf17a8, 0x8b8b7ff4, 0x3f3f7e41,
+ 0x1f1f3e21, 0x5353a6f5, 0x6363c6a5, 0x7575ea9f, 0x35356a5f, 0x2c2c5874,
+ 0x6060c0a0, 0xfdfd936e, 0x27274e69, 0xd3d3cf1c, 0x949441d5, 0xa5a52386,
+ 0x7c7cf884, 0xa1a12b8a, 0x05050a0f, 0x5858b0e8, 0x2d2d5a77, 0xbdbd13ae,
+ 0xd9d9db02, 0xc7c7e720, 0xafaf3798, 0x6b6bd6bd, 0x5454a8fc, 0x0b0b161d,
+ 0xe0e0a949, 0x38387048, 0x0404080c, 0xc8c8f931, 0x9d9d53ce, 0xe7e7a740,
+ 0x1414283c, 0xb1b10bba, 0x878767e0, 0x9c9c51cd, 0xdfdfd708, 0x6f6fdeb1,
+ 0xf9f99b62, 0xdadadd07, 0x2a2a547e, 0xc4c4e125, 0x5959b2eb, 0x16162c3a,
+ 0x7474e89c, 0x91914bda, 0xabab3f94, 0x26264c6a, 0x6161c2a3, 0x7676ec9a,
+ 0x3434685c, 0x2b2b567d, 0xadad339e, 0x99995bc2, 0xfbfb9f64, 0x7272e496,
+ 0xececb15d, 0x33336655, 0x12122436, 0xdeded50b, 0x989859c1, 0x3b3b764d,
+ 0xc0c0e929, 0x9b9b5fc4, 0x3e3e7c42, 0x18183028, 0x10102030, 0x3a3a744e,
+ 0x5656acfa, 0xe1e1ab4a, 0x7777ee99, 0xc9c9fb32, 0x1e1e3c22, 0x9e9e55cb,
+ 0x959543d6, 0xa3a32f8c, 0x909049d9, 0x1919322b, 0xa8a83991, 0x6c6cd8b4,
+ 0x0909121b, 0xd0d0c919, 0xf0f08979, 0x868665e3};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S2_T2[256], 32) = {
+ 0x254a6f25, 0x24486c24, 0x73e69573, 0x67cea967, 0xd7c710d7, 0xae359bae,
+ 0x5cb8e45c, 0x30605030, 0xa42185a4, 0xeeb55bee, 0x6edcb26e, 0xcbff34cb,
+ 0x7dfa877d, 0xb503b6b5, 0x826def82, 0xdbdf04db, 0xe4a145e4, 0x8e75fb8e,
+ 0x4890d848, 0x4992db49, 0x4f9ed14f, 0x5dbae75d, 0x6ad4be6a, 0x78f08878,
+ 0x70e09070, 0x8879f188, 0xe8b951e8, 0x5fbee15f, 0x5ebce25e, 0x8461e584,
+ 0x65caaf65, 0xe2ad4fe2, 0xd8d901d8, 0xe9bb52e9, 0xccf13dcc, 0xedb35eed,
+ 0x4080c040, 0x2f5e712f, 0x11223311, 0x28507828, 0x57aef957, 0xd2cd1fd2,
+ 0xac319dac, 0xe3af4ce3, 0x4a94de4a, 0x152a3f15, 0x1b362d1b, 0xb91ba2b9,
+ 0xb20dbfb2, 0x8069e980, 0x8563e685, 0xa62583a6, 0x2e5c722e, 0x02040602,
+ 0x478ec947, 0x29527b29, 0x070e0907, 0x4b96dd4b, 0x0e1c120e, 0xc1eb2ac1,
+ 0x51a2f351, 0xaa3d97aa, 0x897bf289, 0xd4c115d4, 0xcafd37ca, 0x01020301,
+ 0x468cca46, 0xb30fbcb3, 0xefb758ef, 0xddd30edd, 0x4488cc44, 0x7bf68d7b,
+ 0xc2ed2fc2, 0x7ffe817f, 0xbe15abbe, 0xc3ef2cc3, 0x9f57c89f, 0x20406020,
+ 0x4c98d44c, 0x64c8ac64, 0x836fec83, 0xa22d8fa2, 0x68d0b868, 0x4284c642,
+ 0x13263513, 0xb401b5b4, 0x4182c341, 0xcdf33ecd, 0xba1da7ba, 0xc6e523c6,
+ 0xbb1fa4bb, 0x6ddab76d, 0x4d9ad74d, 0x71e29371, 0x21426321, 0xf48175f4,
+ 0x8d73fe8d, 0xb009b9b0, 0xe5a346e5, 0x934fdc93, 0xfe956bfe, 0x8f77f88f,
+ 0xe6a543e6, 0xcff738cf, 0x4386c543, 0x458acf45, 0x31625331, 0x22446622,
+ 0x376e5937, 0x366c5a36, 0x9645d396, 0xfa9d67fa, 0xbc11adbc, 0x0f1e110f,
+ 0x08101808, 0x52a4f652, 0x1d3a271d, 0x55aaff55, 0x1a342e1a, 0xc5e326c5,
+ 0x4e9cd24e, 0x23466523, 0x69d2bb69, 0x7af48e7a, 0x924ddf92, 0xff9768ff,
+ 0x5bb6ed5b, 0x5ab4ee5a, 0xebbf54eb, 0x9a5dc79a, 0x1c38241c, 0xa93b92a9,
+ 0xd1cb1ad1, 0x7efc827e, 0x0d1a170d, 0xfc916dfc, 0x50a0f050, 0x8a7df78a,
+ 0xb605b3b6, 0x62c4a662, 0xf58376f5, 0x0a141e0a, 0xf89961f8, 0xdcd10ddc,
+ 0x03060503, 0x3c78443c, 0x0c18140c, 0x39724b39, 0xf18b7af1, 0xb819a1b8,
+ 0xf38f7cf3, 0x3d7a473d, 0xf28d7ff2, 0xd5c316d5, 0x9747d097, 0x66ccaa66,
+ 0x816bea81, 0x32645632, 0xa02989a0, 0x00000000, 0x060c0a06, 0xcef53bce,
+ 0xf68573f6, 0xeabd57ea, 0xb707b0b7, 0x172e3917, 0xf78770f7, 0x8c71fd8c,
+ 0x79f28b79, 0xd6c513d6, 0xa72780a7, 0xbf17a8bf, 0x8b7ff48b, 0x3f7e413f,
+ 0x1f3e211f, 0x53a6f553, 0x63c6a563, 0x75ea9f75, 0x356a5f35, 0x2c58742c,
+ 0x60c0a060, 0xfd936efd, 0x274e6927, 0xd3cf1cd3, 0x9441d594, 0xa52386a5,
+ 0x7cf8847c, 0xa12b8aa1, 0x050a0f05, 0x58b0e858, 0x2d5a772d, 0xbd13aebd,
+ 0xd9db02d9, 0xc7e720c7, 0xaf3798af, 0x6bd6bd6b, 0x54a8fc54, 0x0b161d0b,
+ 0xe0a949e0, 0x38704838, 0x04080c04, 0xc8f931c8, 0x9d53ce9d, 0xe7a740e7,
+ 0x14283c14, 0xb10bbab1, 0x8767e087, 0x9c51cd9c, 0xdfd708df, 0x6fdeb16f,
+ 0xf99b62f9, 0xdadd07da, 0x2a547e2a, 0xc4e125c4, 0x59b2eb59, 0x162c3a16,
+ 0x74e89c74, 0x914bda91, 0xab3f94ab, 0x264c6a26, 0x61c2a361, 0x76ec9a76,
+ 0x34685c34, 0x2b567d2b, 0xad339ead, 0x995bc299, 0xfb9f64fb, 0x72e49672,
+ 0xecb15dec, 0x33665533, 0x12243612, 0xded50bde, 0x9859c198, 0x3b764d3b,
+ 0xc0e929c0, 0x9b5fc49b, 0x3e7c423e, 0x18302818, 0x10203010, 0x3a744e3a,
+ 0x56acfa56, 0xe1ab4ae1, 0x77ee9977, 0xc9fb32c9, 0x1e3c221e, 0x9e55cb9e,
+ 0x9543d695, 0xa32f8ca3, 0x9049d990, 0x19322b19, 0xa83991a8, 0x6cd8b46c,
+ 0x09121b09, 0xd0c919d0, 0xf08979f0, 0x8665e386};
+
+IMB_DLL_LOCAL
+DECLARE_ALIGNED(const int S2_T3[256], 32) = {
+ 0x4a6f2525, 0x486c2424, 0xe6957373, 0xcea96767, 0xc710d7d7, 0x359baeae,
+ 0xb8e45c5c, 0x60503030, 0x2185a4a4, 0xb55beeee, 0xdcb26e6e, 0xff34cbcb,
+ 0xfa877d7d, 0x03b6b5b5, 0x6def8282, 0xdf04dbdb, 0xa145e4e4, 0x75fb8e8e,
+ 0x90d84848, 0x92db4949, 0x9ed14f4f, 0xbae75d5d, 0xd4be6a6a, 0xf0887878,
+ 0xe0907070, 0x79f18888, 0xb951e8e8, 0xbee15f5f, 0xbce25e5e, 0x61e58484,
+ 0xcaaf6565, 0xad4fe2e2, 0xd901d8d8, 0xbb52e9e9, 0xf13dcccc, 0xb35eeded,
+ 0x80c04040, 0x5e712f2f, 0x22331111, 0x50782828, 0xaef95757, 0xcd1fd2d2,
+ 0x319dacac, 0xaf4ce3e3, 0x94de4a4a, 0x2a3f1515, 0x362d1b1b, 0x1ba2b9b9,
+ 0x0dbfb2b2, 0x69e98080, 0x63e68585, 0x2583a6a6, 0x5c722e2e, 0x04060202,
+ 0x8ec94747, 0x527b2929, 0x0e090707, 0x96dd4b4b, 0x1c120e0e, 0xeb2ac1c1,
+ 0xa2f35151, 0x3d97aaaa, 0x7bf28989, 0xc115d4d4, 0xfd37caca, 0x02030101,
+ 0x8cca4646, 0x0fbcb3b3, 0xb758efef, 0xd30edddd, 0x88cc4444, 0xf68d7b7b,
+ 0xed2fc2c2, 0xfe817f7f, 0x15abbebe, 0xef2cc3c3, 0x57c89f9f, 0x40602020,
+ 0x98d44c4c, 0xc8ac6464, 0x6fec8383, 0x2d8fa2a2, 0xd0b86868, 0x84c64242,
+ 0x26351313, 0x01b5b4b4, 0x82c34141, 0xf33ecdcd, 0x1da7baba, 0xe523c6c6,
+ 0x1fa4bbbb, 0xdab76d6d, 0x9ad74d4d, 0xe2937171, 0x42632121, 0x8175f4f4,
+ 0x73fe8d8d, 0x09b9b0b0, 0xa346e5e5, 0x4fdc9393, 0x956bfefe, 0x77f88f8f,
+ 0xa543e6e6, 0xf738cfcf, 0x86c54343, 0x8acf4545, 0x62533131, 0x44662222,
+ 0x6e593737, 0x6c5a3636, 0x45d39696, 0x9d67fafa, 0x11adbcbc, 0x1e110f0f,
+ 0x10180808, 0xa4f65252, 0x3a271d1d, 0xaaff5555, 0x342e1a1a, 0xe326c5c5,
+ 0x9cd24e4e, 0x46652323, 0xd2bb6969, 0xf48e7a7a, 0x4ddf9292, 0x9768ffff,
+ 0xb6ed5b5b, 0xb4ee5a5a, 0xbf54ebeb, 0x5dc79a9a, 0x38241c1c, 0x3b92a9a9,
+ 0xcb1ad1d1, 0xfc827e7e, 0x1a170d0d, 0x916dfcfc, 0xa0f05050, 0x7df78a8a,
+ 0x05b3b6b6, 0xc4a66262, 0x8376f5f5, 0x141e0a0a, 0x9961f8f8, 0xd10ddcdc,
+ 0x06050303, 0x78443c3c, 0x18140c0c, 0x724b3939, 0x8b7af1f1, 0x19a1b8b8,
+ 0x8f7cf3f3, 0x7a473d3d, 0x8d7ff2f2, 0xc316d5d5, 0x47d09797, 0xccaa6666,
+ 0x6bea8181, 0x64563232, 0x2989a0a0, 0x00000000, 0x0c0a0606, 0xf53bcece,
+ 0x8573f6f6, 0xbd57eaea, 0x07b0b7b7, 0x2e391717, 0x8770f7f7, 0x71fd8c8c,
+ 0xf28b7979, 0xc513d6d6, 0x2780a7a7, 0x17a8bfbf, 0x7ff48b8b, 0x7e413f3f,
+ 0x3e211f1f, 0xa6f55353, 0xc6a56363, 0xea9f7575, 0x6a5f3535, 0x58742c2c,
+ 0xc0a06060, 0x936efdfd, 0x4e692727, 0xcf1cd3d3, 0x41d59494, 0x2386a5a5,
+ 0xf8847c7c, 0x2b8aa1a1, 0x0a0f0505, 0xb0e85858, 0x5a772d2d, 0x13aebdbd,
+ 0xdb02d9d9, 0xe720c7c7, 0x3798afaf, 0xd6bd6b6b, 0xa8fc5454, 0x161d0b0b,
+ 0xa949e0e0, 0x70483838, 0x080c0404, 0xf931c8c8, 0x53ce9d9d, 0xa740e7e7,
+ 0x283c1414, 0x0bbab1b1, 0x67e08787, 0x51cd9c9c, 0xd708dfdf, 0xdeb16f6f,
+ 0x9b62f9f9, 0xdd07dada, 0x547e2a2a, 0xe125c4c4, 0xb2eb5959, 0x2c3a1616,
+ 0xe89c7474, 0x4bda9191, 0x3f94abab, 0x4c6a2626, 0xc2a36161, 0xec9a7676,
+ 0x685c3434, 0x567d2b2b, 0x339eadad, 0x5bc29999, 0x9f64fbfb, 0xe4967272,
+ 0xb15decec, 0x66553333, 0x24361212, 0xd50bdede, 0x59c19898, 0x764d3b3b,
+ 0xe929c0c0, 0x5fc49b9b, 0x7c423e3e, 0x30281818, 0x20301010, 0x744e3a3a,
+ 0xacfa5656, 0xab4ae1e1, 0xee997777, 0xfb32c9c9, 0x3c221e1e, 0x55cb9e9e,
+ 0x43d69595, 0x2f8ca3a3, 0x49d99090, 0x322b1919, 0x3991a8a8, 0xd8b46c6c,
+ 0x121b0909, 0xc919d0d0, 0x8979f0f0, 0x65e38686};
diff --git a/src/spdk/intel-ipsec-mb/sse/aes128_cbc_dec_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes128_cbc_dec_by4_sse.asm
new file mode 100644
index 000000000..7c57688ff
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes128_cbc_dec_by4_sse.asm
@@ -0,0 +1,532 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES cbc decrypt on 16n bytes doing AES by 4
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void aes_cbc_dec_128_sse(void *in,
+; UINT128 *IV,
+; UINT128 keys[11],
+; void *out,
+; UINT64 len_bytes);
+;
+; arg 1: IN: pointer to input (cipher text)
+; arg 2: IV: pointer to IV
+; arg 3: KEYS: pointer to keys
+; arg 4: OUT: pointer to output (plain text)
+; arg 5: LEN: length in bytes (multiple of 16)
+;
+%include "include/os.asm"
+
+%ifndef AES_CBC_DEC_128
+%define AES_CBC_DEC_128 aes_cbc_dec_128_sse
+%endif
+
+%define MOVDQ movdqu
+
+%ifdef LINUX
+%define IN rdi
+%define IV rsi
+%define KEYS rdx
+%define OUT rcx
+%define LEN r8
+%else
+%define IN rcx
+%define IV rdx
+%define KEYS r8
+%define OUT r9
+%define LEN r10
+%endif
+
+%define IDX rax
+%define TMP IDX
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XKEY0 xmm4
+%define XKEY2 xmm5
+%define XKEY4 xmm6
+%define XKEY6 xmm7
+%define XKEY8 xmm8
+%define XKEY10 xmm9
+%define XIV xmm10
+%define XSAVED0 xmm11
+%define XSAVED1 xmm12
+%define XSAVED2 xmm13
+%define XSAVED3 xmm14
+%define XKEY xmm15
+
+%define IV_TMP XSAVED3
+
+section .text
+
+MKGLOBAL(AES_CBC_DEC_128,function,internal)
+AES_CBC_DEC_128:
+%ifndef LINUX
+ mov LEN, [rsp + 8*5]
+%endif
+
+ mov TMP, LEN
+ and TMP, 3*16
+ jz initial_4
+ cmp TMP, 2*16
+ jb initial_1
+ ja initial_3
+
+initial_2:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XIV, XDATA1
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+ aesdec XDATA1, [KEYS + 1*16]
+
+ mov IDX, 2*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+ aesdec XDATA1, [KEYS + 3*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+ aesdec XDATA1, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+
+ movdqa XKEY8, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+ aesdec XDATA1, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY8 ; 8. DEC
+ aesdec XDATA1, XKEY8
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+ aesdec XDATA1, [KEYS + 9*16]
+
+ aesdeclast XDATA0, XKEY10 ; 10. DEC
+ aesdeclast XDATA1, XKEY10
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+
+ cmp LEN, 2*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_1:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XIV, XDATA0
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+
+ mov IDX, 1*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+
+ movdqa XKEY8, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+
+ aesdec XDATA0, XKEY8 ; 8. DEC
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+
+ aesdeclast XDATA0, XKEY10 ; 10. DEC
+
+ pxor XDATA0, IV_TMP
+
+ movdqu [OUT + 0*16], XDATA0
+
+ cmp LEN, 1*16
+ je done
+ jmp main_loop
+
+
+initial_3:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XIV, XDATA2
+
+ movdqa XKEY, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY ; 1. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+
+ movdqa XKEY, [KEYS + 3*16]
+
+ mov IDX, 3*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY ; 3. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+
+ movdqa XKEY, [KEYS + 5*16]
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY ; 5. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+
+ movdqa XKEY, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+
+ movdqa XKEY8, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY ; 7. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+
+ movdqa XKEY, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY8 ; 8. DEC
+ aesdec XDATA1, XKEY8
+ aesdec XDATA2, XKEY8
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY ; 9. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+
+ aesdeclast XDATA0, XKEY10 ; 10. DEC
+ aesdeclast XDATA1, XKEY10
+ aesdeclast XDATA2, XKEY10
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+
+ cmp LEN, 3*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_4:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+ movdqu XDATA3, [IN + 3*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XIV, XDATA3
+
+ movdqa XKEY, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY ; 1. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 3*16]
+
+ mov IDX, 4*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY ; 3. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 5*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY ; 5. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ movdqa XKEY8, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY ; 7. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY8 ; 8. DEC
+ aesdec XDATA1, XKEY8
+ aesdec XDATA2, XKEY8
+ aesdec XDATA3, XKEY8
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY ; 9. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ aesdeclast XDATA0, XKEY10 ; 10. DEC
+ aesdeclast XDATA1, XKEY10
+ aesdeclast XDATA2, XKEY10
+ aesdeclast XDATA3, XKEY10
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+ movdqu [OUT + 3*16], XDATA3
+
+ cmp LEN, 4*16
+ jz done
+ jmp main_loop
+
+ align 16
+main_loop:
+ ; load cipher text
+ movdqu XDATA0, [IN + IDX + 0*16]
+ movdqu XDATA1, [IN + IDX + 1*16]
+ movdqu XDATA2, [IN + IDX + 2*16]
+ movdqu XDATA3, [IN + IDX + 3*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XSAVED3, XDATA3
+
+ movdqa XKEY, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ add IDX, 4*16
+
+ aesdec XDATA0, XKEY ; 1. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 3*16]
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ aesdec XDATA0, XKEY ; 3. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ aesdec XDATA0, XKEY ; 5. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ aesdec XDATA0, XKEY ; 7. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ movdqa XKEY, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY8 ; 8. DEC
+ aesdec XDATA1, XKEY8
+ aesdec XDATA2, XKEY8
+ aesdec XDATA3, XKEY8
+
+ aesdec XDATA0, XKEY ; 9. DEC
+ aesdec XDATA1, XKEY
+ aesdec XDATA2, XKEY
+ aesdec XDATA3, XKEY
+
+ aesdeclast XDATA0, XKEY10 ; 10. DEC
+ aesdeclast XDATA1, XKEY10
+ aesdeclast XDATA2, XKEY10
+ aesdeclast XDATA3, XKEY10
+
+ pxor XDATA0, XIV
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + IDX + 0*16 - 4*16], XDATA0
+ movdqu [OUT + IDX + 1*16 - 4*16], XDATA1
+ movdqu [OUT + IDX + 2*16 - 4*16], XDATA2
+ movdqu [OUT + IDX + 3*16 - 4*16], XDATA3
+
+ movdqa XIV, XSAVED3
+
+ CMP IDX, LEN
+ jne main_loop
+
+done:
+; Don't write back IV
+; movdqu [IV], XIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes128_cbc_mac_x4.asm b/src/spdk/intel-ipsec-mb/sse/aes128_cbc_mac_x4.asm
new file mode 100644
index 000000000..72e19f482
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes128_cbc_mac_x4.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2017-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; Routine to compute CBC-MAC based on 128 bit CBC AES encryptionk code
+
+%define CBC_MAC
+%include "sse/aes_cbc_enc_128_x4.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/aes128_cntr_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes128_cntr_by4_sse.asm
new file mode 100644
index 000000000..11356afae
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes128_cntr_by4_sse.asm
@@ -0,0 +1,545 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+%include "include/reg_sizes.asm"
+
+; routine to do AES128 CNTR enc/decrypt "by4"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+%ifndef AES_CNTR_128
+%define AES_CNTR_128 aes_cntr_128_sse
+%define AES_CNTR_BIT_128 aes_cntr_bit_128_sse
+%endif
+
+extern byteswap_const, set_byte15, ddq_add_1, ddq_add_2, ddq_add_3, ddq_add_4
+
+%define CONCAT(a,b) a %+ b
+%define MOVDQ movdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey3 xmm11
+%define xkey6 xmm12
+%define xkey9 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef CNTR_CCM_SSE
+%ifdef LINUX
+%define job rdi
+%define p_in rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define p_ivlen r9
+%else ;; LINUX
+%define job rcx
+%define p_in rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define p_ivlen rax
+%endif ;; LINUX
+%define p_IV r11
+%else ;; CNTR_CCM_SSE
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else ;; LINUX
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif ;; LINUX
+%endif ;; CNTR_CCM_SSE
+
+%define tmp r11
+%define flags r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ movdqa xkey0, [p_keys + 0*16]
+%endif
+
+ movdqa xdata0, xcounter
+ pshufb xdata0, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ movdqa CONCAT(xdata,i), xcounter
+ paddd CONCAT(xdata,i), [rel CONCAT(ddq_add_,i)]
+ pshufb CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 1*16]
+
+ pxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ paddq xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ paddd xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ pxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey3, [p_keys + 3*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+ movdqa xkeyB, [p_keys + 4*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey3 ; key 3
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 4
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey6, [p_keys + 6*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey6 ; key 6
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 8*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey9, [p_keys + 9*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 8
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey9 ; key 9
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ aesenclast CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ MOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+ pxor CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ movdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+ pslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ pandn xtmp2, CONCAT(xdata, idx)
+ por xtmp2, xtmp
+ movdqa CONCAT(xdata, idx), xtmp2
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ MOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+section .text
+
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT/CCM)
+
+%ifidn %%CNTR_TYPE, CCM
+ mov p_in, [job + _src]
+ add p_in, [job + _cipher_start_src_offset_in_bytes]
+ mov p_ivlen, [job + _iv_len_in_bytes]
+ mov num_bytes, [job + _msg_len_to_cipher_in_bytes]
+ mov p_keys, [job + _aes_enc_key_expanded]
+ mov p_out, [job + _dst]
+
+ movdqa xbyteswap, [rel byteswap_const]
+ ;; Prepare IV ;;
+
+ ;; Byte 0: flags with L'
+ ;; Calculate L' = 15 - Nonce length - 1 = 14 - IV length
+ mov flags, 14
+ sub flags, p_ivlen
+ movd xcounter, DWORD(flags)
+ ;; Bytes 1 - 13: Nonce (7 - 13 bytes long)
+
+ ;; Bytes 1 - 7 are always copied (first 7 bytes)
+ mov p_IV, [job + _iv]
+ pinsrb xcounter, [p_IV], 1
+ pinsrw xcounter, [p_IV + 1], 1
+ pinsrd xcounter, [p_IV + 3], 1
+
+ cmp p_ivlen, 7
+ je _finish_nonce_move
+
+ cmp p_ivlen, 8
+ je _iv_length_8
+ cmp p_ivlen, 9
+ je _iv_length_9
+ cmp p_ivlen, 10
+ je _iv_length_10
+ cmp p_ivlen, 11
+ je _iv_length_11
+ cmp p_ivlen, 12
+ je _iv_length_12
+
+ ;; Bytes 8 - 13
+_iv_length_13:
+ pinsrb xcounter, [p_IV + 12], 13
+_iv_length_12:
+ pinsrb xcounter, [p_IV + 11], 12
+_iv_length_11:
+ pinsrd xcounter, [p_IV + 7], 2
+ jmp _finish_nonce_move
+_iv_length_10:
+ pinsrb xcounter, [p_IV + 9], 10
+_iv_length_9:
+ pinsrb xcounter, [p_IV + 8], 9
+_iv_length_8:
+ pinsrb xcounter, [p_IV + 7], 8
+
+_finish_nonce_move:
+ ; last byte = 1
+ por xcounter, [rel set_byte15]
+%else ;; CNTR/CNTR_BIT
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5] ; arg5
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ movdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ pinsrq xcounter, [p_IV], 0
+ pinsrd xcounter, [p_IV + 8], 2
+ pinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ movdqu xcounter, [p_IV]
+%endif
+%endif ;; CNTR/CNTR_BIT/CCM
+%%bswap_iv:
+ pshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+ mov tmp, num_bytes
+ and tmp, 3*16
+ jz %%chk ; x4 > or < 15 (not 3 lines)
+
+ ; 1 <= tmp <= 3
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE ; 1 block
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE ; 2 blocks
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE ; 3 blocks
+ add p_out, 3*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(3*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 4 blocks
+ movdqa xkey0, [p_keys + 0*16]
+ movdqa xkey3, [p_keys + 3*16]
+ movdqa xkey6, [p_keys + 6*16]
+ movdqa xkey9, [p_keys + 9*16]
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 4 blocks + partial bytes
+ do_aes_noload 4, %%CNTR_TYPE
+ add p_out, 4*16
+ sub num_bytes, 4*16
+ cmp num_bytes, 4*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+%ifidn %%CNTR_TYPE, CCM
+ mov rax, job
+ or dword [rax + _status], STS_COMPLETED_AES
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_sse_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ pshufb xcounter, xbyteswap
+ movdqa xdata0, xcounter
+ pxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 9
+ aesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ aesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ pxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_sse_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ pandn xtmp2, xdata0
+ por xtmp2, xtmp
+ movdqa xdata0, xtmp2
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_sse_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ movdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+%ifdef CNTR_CCM_SSE
+; JOB_AES_HMAC * aes_cntr_ccm_128_sse(JOB_AES_HMAC *job)
+; arg 1 : job
+MKGLOBAL(AES_CNTR_CCM_128,function,internal)
+AES_CNTR_CCM_128:
+ DO_CNTR CCM
+%else
+;; aes_cntr_128_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bytes, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_128,function,internal)
+AES_CNTR_128:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_128_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bits, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_BIT_128,function,internal)
+AES_CNTR_BIT_128:
+ DO_CNTR CNTR_BIT
+%endif ;; CNTR_CCM_SSE
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes128_cntr_ccm_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes128_cntr_ccm_by4_sse.asm
new file mode 100644
index 000000000..8c54715ee
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes128_cntr_ccm_by4_sse.asm
@@ -0,0 +1,32 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define CNTR_CCM_SSE
+%ifndef AES_CNTR_CCM_128
+%define AES_CNTR_CCM_128 aes_cntr_ccm_128_sse
+%endif
+%include "sse/aes128_cntr_by4_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/aes192_cbc_dec_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes192_cbc_dec_by4_sse.asm
new file mode 100644
index 000000000..144de4f70
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes192_cbc_dec_by4_sse.asm
@@ -0,0 +1,590 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES cbc decrypt on 16n bytes doing AES by 4
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void aes_cbc_dec_192_sse(void *in,
+; UINT128 *IV,
+; UINT128 keys[13], // +1 over key length
+; void *out,
+; UINT64 len_bytes);
+;
+; arg 1: IN: pointer to input (cipher text)
+; arg 2: IV: pointer to IV
+; arg 3: KEYS: pointer to keys
+; arg 4: OUT: pointer to output (plain text)
+; arg 5: LEN: length in bytes (multiple of 16)
+;
+%include "include/os.asm"
+
+
+%ifndef AES_CBC_DEC_192
+%define AES_CBC_DEC_192 aes_cbc_dec_192_sse
+%endif
+
+%define MOVDQ movdqu
+
+%ifdef LINUX
+%define IN rdi
+%define IV rsi
+%define KEYS rdx
+%define OUT rcx
+%define LEN r8
+%else
+%define IN rcx
+%define IV rdx
+%define KEYS r8
+%define OUT r9
+%define LEN r10
+%endif
+
+%define IDX rax
+%define TMP IDX
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XKEY0 xmm4
+%define XKEY2 xmm5
+%define XKEY4 xmm6
+%define XKEY6 xmm7
+%define XKEY10 xmm8
+%define XIV xmm9
+%define XSAVED0 xmm10
+%define XSAVED1 xmm11
+%define XSAVED2 xmm12
+%define XSAVED3 xmm13
+%define XKEY_A xmm14
+%define XKEY_B xmm15
+
+%define IV_TMP XSAVED3
+
+section .text
+
+MKGLOBAL(AES_CBC_DEC_192,function,internal)
+AES_CBC_DEC_192:
+%ifndef LINUX
+ mov LEN, [rsp + 8*5]
+%endif
+
+ mov TMP, LEN
+ and TMP, 3*16
+ jz initial_4
+ cmp TMP, 2*16
+ jb initial_1
+ ja initial_3
+
+initial_2:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XIV, XDATA1
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+ aesdec XDATA1, [KEYS + 1*16]
+
+ mov IDX, 2*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+ aesdec XDATA1, [KEYS + 3*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+ aesdec XDATA1, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+ aesdec XDATA1, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+ aesdec XDATA1, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+
+ aesdec XDATA0, [KEYS + 11*16] ; 11. DEC
+ aesdec XDATA1, [KEYS + 11*16]
+
+ aesdeclast XDATA0, [KEYS + 12*16] ; 12. DEC
+ aesdeclast XDATA1, [KEYS + 12*16]
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+
+ cmp LEN, 2*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_1:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XIV, XDATA0
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+
+ mov IDX, 1*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+
+ aesdec XDATA0, [KEYS + 11*16] ; 11. DEC
+
+ aesdeclast XDATA0, [KEYS + 12*16] ; 12. DEC
+
+ pxor XDATA0, IV_TMP
+
+ movdqu [OUT + 0*16], XDATA0
+
+ cmp LEN, 1*16
+ je done
+ jmp main_loop
+
+
+initial_3:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XIV, XDATA2
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+ mov IDX, 3*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ aesdeclast XDATA0, XKEY_B ; 12. DEC
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+
+
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+
+ cmp LEN, 3*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_4:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+ movdqu XDATA3, [IN + 3*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XIV, XDATA3
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ mov IDX, 4*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+ aesdec XDATA3, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+
+
+ aesdeclast XDATA0, XKEY_B ; 12. DEC
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+ aesdeclast XDATA3, XKEY_B
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+ movdqu [OUT + 3*16], XDATA3
+
+ cmp LEN, 4*16
+ jz done
+ jmp main_loop
+
+ align 16
+main_loop:
+ ; load cipher text
+ movdqu XDATA0, [IN + IDX + 0*16]
+ movdqu XDATA1, [IN + IDX + 1*16]
+ movdqu XDATA2, [IN + IDX + 2*16]
+ movdqu XDATA3, [IN + IDX + 3*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XSAVED3, XDATA3
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ add IDX, 4*16
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+ aesdec XDATA3, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ aesdeclast XDATA0, XKEY_B ; 12. DECLAST
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+ aesdeclast XDATA3, XKEY_B
+
+ pxor XDATA0, XIV
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + IDX + 0*16 - 4*16], XDATA0
+ movdqu [OUT + IDX + 1*16 - 4*16], XDATA1
+ movdqu [OUT + IDX + 2*16 - 4*16], XDATA2
+ movdqu [OUT + IDX + 3*16 - 4*16], XDATA3
+
+ movdqa XIV, XSAVED3
+
+ CMP IDX, LEN
+ jne main_loop
+
+done:
+; Don't write back IV
+; movdqu [IV], XIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes192_cntr_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes192_cntr_by4_sse.asm
new file mode 100644
index 000000000..eaa89f21e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes192_cntr_by4_sse.asm
@@ -0,0 +1,470 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+%include "include/reg_sizes.asm"
+
+; routine to do AES192 CNTR enc/decrypt "by4"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+%ifndef AES_CNTR_192
+%define AES_CNTR_192 aes_cntr_192_sse
+%define AES_CNTR_BIT_192 aes_cntr_bit_192_sse
+%endif
+
+extern byteswap_const, ddq_add_1, ddq_add_2, ddq_add_3, ddq_add_4
+
+%define CONCAT(a,b) a %+ b
+%define MOVDQ movdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey4 xmm11
+%define xkey8 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif
+
+%define tmp r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ movdqa xkey0, [p_keys + 0*16]
+%endif
+
+ movdqa xdata0, xcounter
+ pshufb xdata0, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ movdqa CONCAT(xdata,i), xcounter
+ paddd CONCAT(xdata,i), [rel CONCAT(ddq_add_,i)]
+ pshufb CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 1*16]
+
+ pxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ paddq xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ paddd xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ pxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 3*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ movdqa xkey4, [p_keys + 4*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 3
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey4 ; key 4
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 6*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 6
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey8, [p_keys + 8*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 9*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey8 ; key 8
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 9
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 11*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey12, [p_keys + 12*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 11
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ aesenclast CONCAT(xdata,i), xkey12 ; key 12
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ MOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+ pxor CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ movdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+ pslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ pandn xtmp2, CONCAT(xdata, idx)
+ por xtmp2, xtmp
+ movdqa CONCAT(xdata, idx), xtmp2
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ MOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+section .text
+
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ movdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ pinsrq xcounter, [p_IV], 0
+ pinsrd xcounter, [p_IV + 8], 2
+ pinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ movdqu xcounter, [p_IV]
+%endif
+
+%%bswap_iv:
+ pshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+ mov tmp, num_bytes
+ and tmp, 3*16
+ jz %%chk ; x4 > or < 15 (not 3 lines)
+
+ ; 1 <= tmp <= 3
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE
+ add p_out, 3*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(3*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 4 blocks
+ movdqa xkey0, [p_keys + 0*16]
+ movdqa xkey4, [p_keys + 4*16]
+ movdqa xkey8, [p_keys + 8*16]
+ movdqa xkey12, [p_keys + 12*16]
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 4 blocks + partial bytes
+ do_aes_noload 4, %%CNTR_TYPE
+ add p_out, 4*16
+ sub num_bytes, 4*16
+ cmp num_bytes, 4*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_sse_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ pshufb xcounter, xbyteswap
+ movdqa xdata0, xcounter
+ pxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 11
+ aesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ aesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ pxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_sse_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ pandn xtmp2, xdata0
+ por xtmp2, xtmp
+ movdqa xdata0, xtmp2
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_sse_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ movdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+;; aes_cntr_192_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bytes, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_192,function,internal)
+AES_CNTR_192:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_192_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bits, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_BIT_192,function,internal)
+AES_CNTR_BIT_192:
+ DO_CNTR CNTR_BIT
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes256_cbc_dec_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes256_cbc_dec_by4_sse.asm
new file mode 100644
index 000000000..c82a4f58a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes256_cbc_dec_by4_sse.asm
@@ -0,0 +1,634 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES cbc decrypt on 16n bytes doing AES by 4
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void aes_cbc_dec_256_sse(void *in,
+; UINT128 *IV,
+; UINT128 keys[15],
+; void *out,
+; UINT64 len_bytes);
+;
+; arg 1: rcx: pointer to input (cipher text)
+; arg 2: rdx: pointer to IV
+; arg 3: r8: pointer to keys
+; arg 4: r9: pointer to output (plain text)
+; arg 5: sp: length in bytes (multiple of 16)
+;
+
+%include "include/os.asm"
+
+%ifndef AES_CBC_DEC_256
+%define AES_CBC_DEC_256 aes_cbc_dec_256_sse
+%endif
+
+%define MOVDQ movdqu
+
+%ifdef LINUX
+%define IN rdi
+%define IV rsi
+%define KEYS rdx
+%define OUT rcx
+%define LEN r8
+%else
+%define IN rcx
+%define IV rdx
+%define KEYS r8
+%define OUT r9
+%define LEN r10
+%endif
+
+%define IDX rax
+%define TMP IDX
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XKEY0 xmm4
+%define XKEY2 xmm5
+%define XKEY4 xmm6
+%define XKEY6 xmm7
+%define XKEY10 xmm8
+%define XIV xmm9
+%define XSAVED0 xmm10
+%define XSAVED1 xmm11
+%define XSAVED2 xmm12
+%define XSAVED3 xmm13
+%define XKEY_A xmm14
+%define XKEY_B xmm15
+
+%define IV_TMP XSAVED3
+
+section .text
+
+MKGLOBAL(AES_CBC_DEC_256,function,internal)
+AES_CBC_DEC_256:
+%ifndef LINUX
+ mov LEN, [rsp + 8*5]
+%endif
+
+ mov TMP, LEN
+ and TMP, 3*16
+ jz initial_4
+ cmp TMP, 2*16
+ jb initial_1
+ ja initial_3
+
+initial_2:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XIV, XDATA1
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+ aesdec XDATA1, [KEYS + 1*16]
+
+ mov IDX, 2*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+ aesdec XDATA1, [KEYS + 3*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+ aesdec XDATA1, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+ aesdec XDATA1, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+ aesdec XDATA1, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+
+ aesdec XDATA0, [KEYS + 11*16] ; 11. DEC
+ aesdec XDATA1, [KEYS + 11*16]
+
+ aesdec XDATA0, [KEYS + 12*16] ; 12. DEC
+ aesdec XDATA1, [KEYS + 12*16]
+
+ aesdec XDATA0, [KEYS + 13*16] ; 13. DEC
+ aesdec XDATA1, [KEYS + 13*16]
+
+ aesdeclast XDATA0, [KEYS + 14*16] ; 14. DEC
+ aesdeclast XDATA1, [KEYS + 14*16]
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+
+ cmp LEN, 2*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_1:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XIV, XDATA0
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, [KEYS + 1*16] ; 1. DEC
+
+ mov IDX, 1*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, [KEYS + 3*16] ; 3. DEC
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, [KEYS + 5*16] ; 5. DEC
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, [KEYS + 7*16] ; 7. DEC
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, [KEYS + 9*16] ; 9. DEC
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+
+ aesdec XDATA0, [KEYS + 11*16] ; 11. DEC
+
+ aesdec XDATA0, [KEYS + 12*16] ; 12. DEC
+
+ aesdec XDATA0, [KEYS + 13*16] ; 13. DEC
+
+ aesdeclast XDATA0, [KEYS + 14*16] ; 14. DEC
+
+ pxor XDATA0, IV_TMP
+
+ movdqu [OUT + 0*16], XDATA0
+
+ cmp LEN, 1*16
+ je done
+ jmp main_loop
+
+
+initial_3:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XIV, XDATA2
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+ mov IDX, 3*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ aesdec XDATA0, XKEY_B ; 12. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ aesdec XDATA0, XKEY_A ; 13. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+
+ aesdeclast XDATA0, XKEY_B ; 14. DEC
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+
+ cmp LEN, 3*16
+ je done
+ jmp main_loop
+
+
+ align 16
+initial_4:
+ ; load cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+ movdqu XDATA3, [IN + 3*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XIV, XDATA3
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ mov IDX, 4*16
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ movdqu IV_TMP, [IV]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+ aesdec XDATA3, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ aesdec XDATA0, XKEY_B ; 12. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ aesdec XDATA0, XKEY_A ; 13. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ aesdeclast XDATA0, XKEY_B ; 14. DEC
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+ aesdeclast XDATA3, XKEY_B
+
+ pxor XDATA0, IV_TMP
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+ movdqu [OUT + 3*16], XDATA3
+
+ cmp LEN, 4*16
+ jz done
+ jmp main_loop
+
+ align 16
+main_loop:
+ ; load cipher text
+ movdqu XDATA0, [IN + IDX + 0*16]
+ movdqu XDATA1, [IN + IDX + 1*16]
+ movdqu XDATA2, [IN + IDX + 2*16]
+ movdqu XDATA3, [IN + IDX + 3*16]
+
+ ; save cipher text
+ movdqa XSAVED0, XDATA0
+ movdqa XSAVED1, XDATA1
+ movdqa XSAVED2, XDATA2
+ movdqa XSAVED3, XDATA3
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ add IDX, 4*16
+
+ aesdec XDATA0, XKEY_A ; 1. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ aesdec XDATA0, XKEY2 ; 2. DEC
+ aesdec XDATA1, XKEY2
+ aesdec XDATA2, XKEY2
+ aesdec XDATA3, XKEY2
+
+ aesdec XDATA0, XKEY_A ; 3. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ aesdec XDATA0, XKEY4 ; 4. DEC
+ aesdec XDATA1, XKEY4
+ aesdec XDATA2, XKEY4
+ aesdec XDATA3, XKEY4
+
+ aesdec XDATA0, XKEY_A ; 5. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ aesdec XDATA0, XKEY6 ; 6. DEC
+ aesdec XDATA1, XKEY6
+ aesdec XDATA2, XKEY6
+ aesdec XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ aesdec XDATA0, XKEY_A ; 7. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ aesdec XDATA0, XKEY_B ; 8. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ aesdec XDATA0, XKEY_A ; 9. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ aesdec XDATA0, XKEY10 ; 10. DEC
+ aesdec XDATA1, XKEY10
+ aesdec XDATA2, XKEY10
+ aesdec XDATA3, XKEY10
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ aesdec XDATA0, XKEY_A ; 11. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ aesdec XDATA0, XKEY_B ; 12. DEC
+ aesdec XDATA1, XKEY_B
+ aesdec XDATA2, XKEY_B
+ aesdec XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ aesdec XDATA0, XKEY_A ; 13. DEC
+ aesdec XDATA1, XKEY_A
+ aesdec XDATA2, XKEY_A
+ aesdec XDATA3, XKEY_A
+
+ aesdeclast XDATA0, XKEY_B ; 14. DEC
+ aesdeclast XDATA1, XKEY_B
+ aesdeclast XDATA2, XKEY_B
+ aesdeclast XDATA3, XKEY_B
+
+ pxor XDATA0, XIV
+ pxor XDATA1, XSAVED0
+ pxor XDATA2, XSAVED1
+ pxor XDATA3, XSAVED2
+
+ movdqu [OUT + IDX + 0*16 - 4*16], XDATA0
+ movdqu [OUT + IDX + 1*16 - 4*16], XDATA1
+ movdqu [OUT + IDX + 2*16 - 4*16], XDATA2
+ movdqu [OUT + IDX + 3*16 - 4*16], XDATA3
+
+ movdqa XIV, XSAVED3
+
+ CMP IDX, LEN
+ jne main_loop
+
+done:
+; Don't write back IV
+; movdqu [IV], XIV
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes256_cntr_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes256_cntr_by4_sse.asm
new file mode 100644
index 000000000..6d8f211f7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes256_cntr_by4_sse.asm
@@ -0,0 +1,483 @@
+;;
+;; Copyright (c) 2012-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+; routine to do AES256 CNTR enc/decrypt "by4"
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+%ifndef AES_CNTR_256
+%define AES_CNTR_256 aes_cntr_256_sse
+%define AES_CNTR_BIT_256 aes_cntr_bit_256_sse
+%endif
+
+extern byteswap_const, ddq_add_1, ddq_add_2, ddq_add_3, ddq_add_4
+
+%define CONCAT(a,b) a %+ b
+%define MOVDQ movdqu
+
+%define xdata0 xmm0
+%define xdata1 xmm1
+%define xpart xmm1
+%define xdata2 xmm2
+%define xdata3 xmm3
+%define xdata4 xmm4
+%define xdata5 xmm5
+%define xdata6 xmm6
+%define xdata7 xmm7
+%define xcounter xmm8
+%define xtmp xmm8
+%define xbyteswap xmm9
+%define xtmp2 xmm9
+%define xkey0 xmm10
+%define xtmp3 xmm10
+%define xkey4 xmm11
+%define xkey8 xmm12
+%define xkey12 xmm13
+%define xkeyA xmm14
+%define xkeyB xmm15
+
+%ifdef LINUX
+%define p_in rdi
+%define p_IV rsi
+%define p_keys rdx
+%define p_out rcx
+%define num_bytes r8
+%define num_bits r8
+%define p_ivlen r9
+%else
+%define p_in rcx
+%define p_IV rdx
+%define p_keys r8
+%define p_out r9
+%define num_bytes r10
+%define num_bits r10
+%define p_ivlen qword [rsp + 8*6]
+%endif
+
+%define tmp r11
+
+%define r_bits r12
+%define tmp2 r13
+%define mask r14
+
+%macro do_aes_load 2
+ do_aes %1, %2, 1
+%endmacro
+
+%macro do_aes_noload 2
+ do_aes %1, %2, 0
+%endmacro
+
+
+; do_aes num_in_par load_keys
+; This increments p_in, but not p_out
+%macro do_aes 3
+%define %%by %1
+%define %%cntr_type %2
+%define %%load_keys %3
+
+%if (%%load_keys)
+ movdqa xkey0, [p_keys + 0*16]
+%endif
+
+ movdqa xdata0, xcounter
+ pshufb xdata0, xbyteswap
+%assign i 1
+%rep (%%by - 1)
+ movdqa CONCAT(xdata,i), xcounter
+ paddd CONCAT(xdata,i), [rel CONCAT(ddq_add_,i)]
+ pshufb CONCAT(xdata,i), xbyteswap
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 1*16]
+
+ pxor xdata0, xkey0
+%ifidn %%cntr_type, CNTR_BIT
+ paddq xcounter, [rel CONCAT(ddq_add_,%%by)]
+%else
+ paddd xcounter, [rel CONCAT(ddq_add_,%%by)]
+%endif
+
+%assign i 1
+%rep (%%by - 1)
+ pxor CONCAT(xdata,i), xkey0
+%assign i (i + 1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 2*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 1
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 3*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 2
+%assign i (i+1)
+%endrep
+
+ add p_in, 16*%%by
+
+%if (%%load_keys)
+ movdqa xkey4, [p_keys + 4*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 3
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 5*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey4 ; key 4
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 6*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 5
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 7*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 6
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey8, [p_keys + 8*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 7
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 9*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey8 ; key 8
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 10*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 9
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 11*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyB ; key 10
+%assign i (i+1)
+%endrep
+
+%if (%%load_keys)
+ movdqa xkey12, [p_keys + 12*16]
+%endif
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 11
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyA, [p_keys + 13*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkey12 ; key 12
+%assign i (i+1)
+%endrep
+
+ movdqa xkeyB, [p_keys + 14*16]
+%assign i 0
+%rep %%by
+ aesenc CONCAT(xdata,i), xkeyA ; key 13
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep %%by
+ aesenclast CONCAT(xdata,i), xkeyB ; key 14
+%assign i (i+1)
+%endrep
+
+%assign i 0
+%rep (%%by / 2)
+%assign j (i+1)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ MOVDQ xkeyB, [p_in + j*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+ pxor CONCAT(xdata,j), xkeyB
+%assign i (i+2)
+%endrep
+%if (i < %%by)
+ MOVDQ xkeyA, [p_in + i*16 - 16*%%by]
+ pxor CONCAT(xdata,i), xkeyA
+%endif
+
+%ifidn %%cntr_type, CNTR_BIT
+ ;; check if this is the end of the message
+ mov tmp, num_bytes
+ and tmp, ~(%%by*16)
+ jnz %%skip_preserve
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%skip_preserve
+
+%assign idx (%%by - 1)
+ ;; Load output to get last partial byte
+ movdqu xtmp, [p_out + idx * 16]
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+ pslldq xtmp2, 15
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear all bits from the input that are not to be ciphered
+ pandn xtmp2, CONCAT(xdata, idx)
+ por xtmp2, xtmp
+ movdqa CONCAT(xdata, idx), xtmp2
+
+%%skip_preserve:
+%endif
+
+%assign i 0
+%rep %%by
+ MOVDQ [p_out + i*16], CONCAT(xdata,i)
+%assign i (i+1)
+%endrep
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+section .text
+
+;; Macro performing AES-CTR.
+;;
+%macro DO_CNTR 1
+%define %%CNTR_TYPE %1 ; [in] Type of CNTR operation to do (CNTR/CNTR_BIT)
+
+%ifndef LINUX
+ mov num_bytes, [rsp + 8*5]
+%endif
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ push r12
+ push r13
+ push r14
+%endif
+
+ movdqa xbyteswap, [rel byteswap_const]
+%ifidn %%CNTR_TYPE, CNTR
+ test p_ivlen, 16
+ jnz %%iv_is_16_bytes
+ ; Read 12 bytes: Nonce + ESP IV. Then pad with block counter 0x00000001
+ mov DWORD(tmp), 0x01000000
+ pinsrq xcounter, [p_IV], 0
+ pinsrd xcounter, [p_IV + 8], 2
+ pinsrd xcounter, DWORD(tmp), 3
+
+%else ;; CNTR_BIT
+ ; Read 16 byte IV: Nonce + 8-byte block counter (BE)
+ movdqu xcounter, [p_IV]
+%endif
+
+%%bswap_iv:
+ pshufb xcounter, xbyteswap
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CNTR_BIT)
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ mov r_bits, num_bits
+ add num_bits, 7
+ shr num_bits, 3 ; "num_bits" and "num_bytes" registers are the same
+ and r_bits, 7 ; Check if there are remainder bits (0-7)
+%endif
+ mov tmp, num_bytes
+ and tmp, 3*16
+ jz %%chk ; x4 > or < 15 (not 3 lines)
+
+ ; 1 <= tmp <= 3
+ cmp tmp, 2*16
+ jg %%eq3
+ je %%eq2
+%%eq1:
+ do_aes_load 1, %%CNTR_TYPE
+ add p_out, 1*16
+ jmp %%chk
+
+%%eq2:
+ do_aes_load 2, %%CNTR_TYPE
+ add p_out, 2*16
+ jmp %%chk
+
+%%eq3:
+ do_aes_load 3, %%CNTR_TYPE
+ add p_out, 3*16
+ ; fall through to chk
+%%chk:
+ and num_bytes, ~(3*16)
+ jz %%do_return2
+
+ cmp num_bytes, 16
+ jb %%last
+
+ ; process multiples of 4 blocks
+ movdqa xkey0, [p_keys + 0*16]
+ movdqa xkey4, [p_keys + 4*16]
+ movdqa xkey8, [p_keys + 8*16]
+ movdqa xkey12, [p_keys + 12*16]
+
+align 32
+%%main_loop2:
+ ; num_bytes is a multiple of 4 blocks + partial bytes
+ do_aes_noload 4, %%CNTR_TYPE
+ add p_out, 4*16
+ sub num_bytes, 4*16
+ cmp num_bytes, 4*16
+ jae %%main_loop2
+
+ ; Check if there is a partial block
+ or num_bytes, num_bytes
+ jnz %%last
+
+%%do_return2:
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ pop r14
+ pop r13
+ pop r12
+%endif
+
+ ret
+
+%%last:
+
+ ; load partial block into XMM register
+ simd_load_sse_15_1 xpart, p_in, num_bytes
+
+%%final_ctr_enc:
+ ; Encryption of a single partial block
+ pshufb xcounter, xbyteswap
+ movdqa xdata0, xcounter
+ pxor xdata0, [p_keys + 16*0]
+%assign i 1
+%rep 13
+ aesenc xdata0, [p_keys + 16*i]
+%assign i (i+1)
+%endrep
+ ; created keystream
+ aesenclast xdata0, [p_keys + 16*i]
+
+ ; xor keystream with the message (scratch)
+ pxor xdata0, xpart
+
+%ifidn %%CNTR_TYPE, CNTR_BIT
+ ;; Check if there is a partial byte
+ or r_bits, r_bits
+ jz %%store_output
+
+ ;; Load output to get last partial byte
+ simd_load_sse_15_1 xtmp, p_out, num_bytes
+
+ ;; Save RCX in temporary GP register
+ mov tmp, rcx
+ mov mask, 0xff
+%ifidn r_bits, rcx
+%error "r_bits cannot be mapped to rcx!"
+%endif
+ mov cl, BYTE(r_bits)
+ shr mask, cl ;; e.g. 3 remaining bits -> mask = 00011111
+ mov rcx, tmp
+
+ movq xtmp2, mask
+
+ ;; Get number of full bytes in last block of 16 bytes
+ mov tmp, num_bytes
+ dec tmp
+ XPSLLB xtmp2, tmp, xtmp3, tmp2
+ ;; At this point, xtmp2 contains a mask with all 0s, but with some ones
+ ;; in the partial byte
+
+ ;; Clear all the bits that do not need to be preserved from the output
+ pand xtmp, xtmp2
+
+ ;; Clear the bits from the input that are not to be ciphered
+ pandn xtmp2, xdata0
+ por xtmp2, xtmp
+ movdqa xdata0, xtmp2
+%endif
+
+%%store_output:
+ ; copy result into the output buffer
+ simd_store_sse_15 p_out, xdata0, num_bytes, tmp, rax
+
+ jmp %%do_return2
+
+%%iv_is_16_bytes:
+ ; Read 16 byte IV: Nonce + ESP IV + block counter (BE)
+ movdqu xcounter, [p_IV]
+ jmp %%bswap_iv
+%endmacro
+
+align 32
+;; aes_cntr_256_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bytes, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_256,function,internal)
+AES_CNTR_256:
+ DO_CNTR CNTR
+
+;; aes_cntr_bit_256_sse(void *in, void *IV, void *keys, void *out, UINT64 num_bits, UINT64 iv_len)
+MKGLOBAL(AES_CNTR_BIT_256,function,internal)
+AES_CNTR_BIT_256:
+ DO_CNTR CNTR_BIT
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_128_x4.asm b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_128_x4.asm
new file mode 100644
index 000000000..4b07ecf90
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_128_x4.asm
@@ -0,0 +1,380 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; Routine to do a 128 bit CBC AES encryption / CBC-MAC digest computation
+;;; processes 4 buffers at a time, single data structure as input
+;;; Updates In and Out pointers at end
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+%macro pxor2 2
+ MOVDQ XTMP, %2
+ pxor %1, XTMP
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_128_x4(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+struc STACK
+_gpr_save: resq 8
+endstruc
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi ;r8
+%define arg4 rsi ;r9
+%endif
+
+%define ARG arg1
+%define LEN arg2
+
+%define IDX rax
+
+%define IN0 r8
+%define KEYS0 rbx
+
+%define IN1 r10
+%define KEYS1 arg3
+
+%define IN2 r12
+%define KEYS2 arg4
+
+%define IN3 r14
+%define KEYS3 rbp
+
+%ifndef CBC_MAC
+;; No cipher text write back for CBC-MAC
+%define OUT0 r9
+%define OUT1 r11
+%define OUT2 r13
+%define OUT3 r15
+%endif
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+
+%define XKEY0_3 xmm4
+%define XKEY0_6 [KEYS0 + 16*6]
+%define XTMP xmm5
+%define XKEY0_9 xmm6
+
+%define XKEY1_3 xmm7
+%define XKEY1_6 xmm8
+%define XKEY1_9 xmm9
+
+%define XKEY2_3 xmm10
+%define XKEY2_6 xmm11
+%define XKEY2_9 xmm12
+
+%define XKEY3_3 xmm13
+%define XKEY3_6 xmm14
+%define XKEY3_9 xmm15
+
+section .text
+
+%ifndef AES_CBC_ENC_X4
+
+%ifdef CBC_MAC
+MKGLOBAL(aes128_cbc_mac_x4,function,internal)
+aes128_cbc_mac_x4:
+%else
+MKGLOBAL(aes_cbc_enc_128_x4,function,internal)
+aes_cbc_enc_128_x4:
+%endif
+
+%else ;; AES_CBC_ENC_X4 already defined
+
+%ifdef CBC_MAC
+MKGLOBAL(aes128_cbc_mac_x4_no_aesni,function,internal)
+aes128_cbc_mac_x4_no_aesni:
+%else
+MKGLOBAL(aes_cbc_enc_128_x4_no_aesni,function,internal)
+aes_cbc_enc_128_x4_no_aesni:
+%endif
+
+%endif
+ sub rsp, STACK_size
+ mov [rsp + _gpr_save + 8*0], rbp
+%ifdef CBC_MAC
+ mov [rsp + _gpr_save + 8*1], rbx
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+%endif
+ mov IDX, 16
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN1, [ARG + _aesarg_in + 8*1]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN3, [ARG + _aesarg_in + 8*3]
+
+ MOVDQ XDATA0, [IN0] ; load first block of plain text
+ MOVDQ XDATA1, [IN1] ; load first block of plain text
+ MOVDQ XDATA2, [IN2] ; load first block of plain text
+ MOVDQ XDATA3, [IN3] ; load first block of plain text
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+
+ pxor XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ pxor XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ pxor XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ pxor XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+
+%ifndef CBC_MAC
+ mov OUT0, [ARG + _aesarg_out + 8*0]
+ mov OUT1, [ARG + _aesarg_out + 8*1]
+ mov OUT2, [ARG + _aesarg_out + 8*2]
+ mov OUT3, [ARG + _aesarg_out + 8*3]
+%endif
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ movdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+ movdqa XKEY1_3, [KEYS1 + 16*3] ; load round 3 key
+ movdqa XKEY2_3, [KEYS2 + 16*3] ; load round 3 key
+ movdqa XKEY3_3, [KEYS3 + 16*3] ; load round 3 key
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ movdqa XKEY1_6, [KEYS1 + 16*6] ; load round 6 key
+ movdqa XKEY2_6, [KEYS2 + 16*6] ; load round 6 key
+ movdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ movdqa XKEY0_9, [KEYS0 + 16*9] ; load round 9 key
+ movdqa XKEY1_9, [KEYS1 + 16*9] ; load round 9 key
+ movdqa XKEY2_9, [KEYS2 + 16*9] ; load round 9 key
+ movdqa XKEY3_9, [KEYS3 + 16*9] ; load round 9 key
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+%ifndef CBC_MAC
+ MOVDQ [OUT0], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1], XDATA1 ; write back ciphertext
+ MOVDQ [OUT2], XDATA2 ; write back ciphertext
+ MOVDQ [OUT3], XDATA3 ; write back ciphertext
+%endif
+ cmp LEN, IDX
+ je done
+
+main_loop:
+ pxor2 XDATA0, [IN0 + IDX] ; plaintext XOR IV
+ pxor2 XDATA1, [IN1 + IDX] ; plaintext XOR IV
+ pxor2 XDATA2, [IN2 + IDX] ; plaintext XOR IV
+ pxor2 XDATA3, [IN3 + IDX] ; plaintext XOR IV
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+%ifndef CBC_MAC
+ ;; No cipher text write back for CBC-MAC
+ MOVDQ [OUT0 + IDX], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1 + IDX], XDATA1 ; write back ciphertext
+ MOVDQ [OUT2 + IDX], XDATA2 ; write back ciphertext
+ MOVDQ [OUT3 + IDX], XDATA3 ; write back ciphertext
+%endif
+
+ add IDX, 16
+ cmp LEN, IDX
+ jne main_loop
+
+done:
+ ;; update IV / store digest for CBC-MAC
+ movdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ movdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ movdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ movdqa [ARG + _aesarg_IV + 16*3], XDATA3
+
+ ;; update IN and OUT
+ add IN0, LEN
+ mov [ARG + _aesarg_in + 8*0], IN0
+ add IN1, LEN
+ mov [ARG + _aesarg_in + 8*1], IN1
+ add IN2, LEN
+ mov [ARG + _aesarg_in + 8*2], IN2
+ add IN3, LEN
+ mov [ARG + _aesarg_in + 8*3], IN3
+
+%ifndef CBC_MAC
+ ;; No OUT pointer updates for CBC-MAC
+ add OUT0, LEN
+ mov [ARG + _aesarg_out + 8*0], OUT0
+ add OUT1, LEN
+ mov [ARG + _aesarg_out + 8*1], OUT1
+ add OUT2, LEN
+ mov [ARG + _aesarg_out + 8*2], OUT2
+ add OUT3, LEN
+ mov [ARG + _aesarg_out + 8*3], OUT3
+%endif
+
+%ifdef CBC_MAC
+ mov rbx, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+%endif
+ mov rbp, [rsp + _gpr_save + 8*0]
+ add rsp, STACK_size
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_192_x4.asm b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_192_x4.asm
new file mode 100644
index 000000000..c9f1cc3c5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_192_x4.asm
@@ -0,0 +1,349 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do a 192 bit CBC AES encrypt
+;;; process 4 buffers at a time, single data structure as input
+;;; Updates In and Out pointers at end
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+%macro pxor2 2
+ MOVDQ XTMP, %2
+ pxor %1, XTMP
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_192_x4(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+
+%define IN0 r8
+%define KEYS0 rbx
+%define OUT0 r9
+
+%define IN1 r10
+%define KEYS1 REG3
+%define OUT1 r11
+
+%define IN2 r12
+%define KEYS2 REG4
+%define OUT2 r13
+
+%define IN3 r14
+%define KEYS3 rbp
+%define OUT3 r15
+
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+
+%define XKEY0_3 xmm4
+%define XKEY0_6 [KEYS0 + 16*6]
+%define XTMP xmm5
+%define XKEY0_9 xmm6
+
+%define XKEY1_3 xmm7
+%define XKEY1_6 xmm8
+%define XKEY1_9 xmm9
+
+%define XKEY2_3 xmm10
+%define XKEY2_6 xmm11
+%define XKEY2_9 xmm12
+
+%define XKEY3_3 xmm13
+%define XKEY3_6 xmm14
+%define XKEY3_9 xmm15
+
+%ifndef AES_CBC_ENC_X4
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4
+%endif
+
+section .text
+
+MKGLOBAL(AES_CBC_ENC_X4,function,internal)
+AES_CBC_ENC_X4:
+
+ push rbp
+
+ mov IDX, 16
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN1, [ARG + _aesarg_in + 8*1]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN3, [ARG + _aesarg_in + 8*3]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ MOVDQ XDATA0, [IN0] ; load first block of plain text
+ MOVDQ XDATA1, [IN1] ; load first block of plain text
+ MOVDQ XDATA2, [IN2] ; load first block of plain text
+ MOVDQ XDATA3, [IN3] ; load first block of plain text
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+
+ pxor XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ pxor XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ pxor XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ pxor XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+
+ mov OUT0, [ARG + _aesarg_out + 8*0]
+ mov OUT1, [ARG + _aesarg_out + 8*1]
+ mov OUT2, [ARG + _aesarg_out + 8*2]
+ mov OUT3, [ARG + _aesarg_out + 8*3]
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ movdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+ movdqa XKEY1_3, [KEYS1 + 16*3] ; load round 3 key
+ movdqa XKEY2_3, [KEYS2 + 16*3] ; load round 3 key
+ movdqa XKEY3_3, [KEYS3 + 16*3] ; load round 3 key
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ movdqa XKEY1_6, [KEYS1 + 16*6] ; load round 6 key
+ movdqa XKEY2_6, [KEYS2 + 16*6] ; load round 6 key
+ movdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ movdqa XKEY0_9, [KEYS0 + 16*9] ; load round 9 key
+ movdqa XKEY1_9, [KEYS1 + 16*9] ; load round 9 key
+ movdqa XKEY2_9, [KEYS2 + 16*9] ; load round 9 key
+ movdqa XKEY3_9, [KEYS3 + 16*9] ; load round 9 key
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ aesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ aesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ aesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ aesenclast XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ aesenclast XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ aesenclast XDATA3, [KEYS3 + 16*12] ; 12. ENC
+
+ MOVDQ [OUT0], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1], XDATA1 ; write back ciphertext
+ MOVDQ [OUT2], XDATA2 ; write back ciphertext
+ MOVDQ [OUT3], XDATA3 ; write back ciphertext
+
+ cmp LEN, IDX
+ je done
+
+main_loop:
+ pxor2 XDATA0, [IN0 + IDX] ; plaintext XOR IV
+ pxor2 XDATA1, [IN1 + IDX] ; plaintext XOR IV
+ pxor2 XDATA2, [IN2 + IDX] ; plaintext XOR IV
+ pxor2 XDATA3, [IN3 + IDX] ; plaintext XOR IV
+
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ aesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ aesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ aesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ aesenclast XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ aesenclast XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ aesenclast XDATA3, [KEYS3 + 16*12] ; 12. ENC
+
+
+
+ MOVDQ [OUT0 + IDX], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1 + IDX], XDATA1 ; write back ciphertex
+ MOVDQ [OUT2 + IDX], XDATA2 ; write back ciphertex
+ MOVDQ [OUT3 + IDX], XDATA3 ; write back ciphertex
+
+
+ add IDX, 16
+ cmp LEN, IDX
+ jne main_loop
+
+done:
+ ;; update IV
+ movdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ movdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ movdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ movdqa [ARG + _aesarg_IV + 16*3], XDATA3
+
+ ;; update IN and OUT
+ add IN0, LEN
+ mov [ARG + _aesarg_in + 8*0], IN0
+ add IN1, LEN
+ mov [ARG + _aesarg_in + 8*1], IN1
+ add IN2, LEN
+ mov [ARG + _aesarg_in + 8*2], IN2
+ add IN3, LEN
+ mov [ARG + _aesarg_in + 8*3], IN3
+
+ add OUT0, LEN
+ mov [ARG + _aesarg_out + 8*0], OUT0
+ add OUT1, LEN
+ mov [ARG + _aesarg_out + 8*1], OUT1
+ add OUT2, LEN
+ mov [ARG + _aesarg_out + 8*2], OUT2
+ add OUT3, LEN
+ mov [ARG + _aesarg_out + 8*3], OUT3
+
+ pop rbp
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_256_x4.asm b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_256_x4.asm
new file mode 100644
index 000000000..e51f4caac
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_cbc_enc_256_x4.asm
@@ -0,0 +1,368 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do a 256 bit CBC AES encrypt
+;;; process 4 buffers at a time, single data structure as input
+;;; Updates In and Out pointers at end
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+%macro pxor2 2
+ MOVDQ XTMP, %2
+ pxor %1, XTMP
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_ARGS {
+;; void* in[8];
+;; void* out[8];
+;; UINT128* keys[8];
+;; UINT128 IV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cbc_enc_256_x4(AES_ARGS *args, UINT64 len);
+;; arg 1: ARG : addr of AES_ARGS structure
+;; arg 2: LEN : len (in units of bytes)
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+
+%define IN0 r8
+%define KEYS0 rbx
+%define OUT0 r9
+
+%define IN1 r10
+%define KEYS1 REG3
+%define OUT1 r11
+
+%define IN2 r12
+%define KEYS2 REG4
+%define OUT2 r13
+
+%define IN3 r14
+%define KEYS3 rbp
+%define OUT3 r15
+
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+
+%define XKEY0_3 xmm4
+%define XKEY0_6 [KEYS0 + 16*6]
+%define XTMP xmm5
+%define XKEY0_9 xmm6
+
+%define XKEY1_3 xmm7
+%define XKEY1_6 xmm8
+%define XKEY1_9 xmm9
+
+%define XKEY2_3 xmm10
+%define XKEY2_6 xmm11
+%define XKEY2_9 xmm12
+
+%define XKEY3_3 xmm13
+%define XKEY3_6 xmm14
+%define XKEY3_9 xmm15
+
+%ifndef AES_CBC_ENC_X4
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4
+%endif
+
+section .text
+
+MKGLOBAL(AES_CBC_ENC_X4,function,internal)
+AES_CBC_ENC_X4:
+
+ push rbp
+
+ mov IDX, 16
+
+ mov IN0, [ARG + _aesarg_in + 8*0]
+ mov IN1, [ARG + _aesarg_in + 8*1]
+ mov IN2, [ARG + _aesarg_in + 8*2]
+ mov IN3, [ARG + _aesarg_in + 8*3]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ MOVDQ XDATA0, [IN0] ; load first block of plain text
+ MOVDQ XDATA1, [IN1] ; load first block of plain text
+ MOVDQ XDATA2, [IN2] ; load first block of plain text
+ MOVDQ XDATA3, [IN3] ; load first block of plain text
+
+ mov KEYS0, [ARG + _aesarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesarg_keys + 8*3]
+
+ pxor XDATA0, [ARG + _aesarg_IV + 16*0] ; plaintext XOR IV
+ pxor XDATA1, [ARG + _aesarg_IV + 16*1] ; plaintext XOR IV
+ pxor XDATA2, [ARG + _aesarg_IV + 16*2] ; plaintext XOR IV
+ pxor XDATA3, [ARG + _aesarg_IV + 16*3] ; plaintext XOR IV
+
+ mov OUT0, [ARG + _aesarg_out + 8*0]
+ mov OUT1, [ARG + _aesarg_out + 8*1]
+ mov OUT2, [ARG + _aesarg_out + 8*2]
+ mov OUT3, [ARG + _aesarg_out + 8*3]
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ movdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+ movdqa XKEY1_3, [KEYS1 + 16*3] ; load round 3 key
+ movdqa XKEY2_3, [KEYS2 + 16*3] ; load round 3 key
+ movdqa XKEY3_3, [KEYS3 + 16*3] ; load round 3 key
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ movdqa XKEY1_6, [KEYS1 + 16*6] ; load round 6 key
+ movdqa XKEY2_6, [KEYS2 + 16*6] ; load round 6 key
+ movdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ movdqa XKEY0_9, [KEYS0 + 16*9] ; load round 9 key
+ movdqa XKEY1_9, [KEYS1 + 16*9] ; load round 9 key
+ movdqa XKEY2_9, [KEYS2 + 16*9] ; load round 9 key
+ movdqa XKEY3_9, [KEYS3 + 16*9] ; load round 9 key
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ aesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ aesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ aesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ aesenc XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ aesenc XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ aesenc XDATA3, [KEYS3 + 16*12] ; 12. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*13] ; 13. ENC
+ aesenc XDATA1, [KEYS1 + 16*13] ; 13. ENC
+ aesenc XDATA2, [KEYS2 + 16*13] ; 13. ENC
+ aesenc XDATA3, [KEYS3 + 16*13] ; 13. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*14] ; 14. ENC
+ aesenclast XDATA1, [KEYS1 + 16*14] ; 14. ENC
+ aesenclast XDATA2, [KEYS2 + 16*14] ; 14. ENC
+ aesenclast XDATA3, [KEYS3 + 16*14] ; 14. ENC
+
+ MOVDQ [OUT0], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1], XDATA1 ; write back ciphertext
+ MOVDQ [OUT2], XDATA2 ; write back ciphertext
+ MOVDQ [OUT3], XDATA3 ; write back ciphertext
+
+ cmp LEN, IDX
+ je done
+
+main_loop:
+ pxor2 XDATA0, [IN0 + IDX] ; plaintext XOR IV
+ pxor2 XDATA1, [IN1 + IDX] ; plaintext XOR IV
+ pxor2 XDATA2, [IN2 + IDX] ; plaintext XOR IV
+ pxor2 XDATA3, [IN3 + IDX] ; plaintext XOR IV
+
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenc XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenc XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenc XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*11] ; 11. ENC
+ aesenc XDATA1, [KEYS1 + 16*11] ; 11. ENC
+ aesenc XDATA2, [KEYS2 + 16*11] ; 11. ENC
+ aesenc XDATA3, [KEYS3 + 16*11] ; 11. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*12] ; 12. ENC
+ aesenc XDATA1, [KEYS1 + 16*12] ; 12. ENC
+ aesenc XDATA2, [KEYS2 + 16*12] ; 12. ENC
+ aesenc XDATA3, [KEYS3 + 16*12] ; 12. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*13] ; 13. ENC
+ aesenc XDATA1, [KEYS1 + 16*13] ; 13. ENC
+ aesenc XDATA2, [KEYS2 + 16*13] ; 13. ENC
+ aesenc XDATA3, [KEYS3 + 16*13] ; 13. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*14] ; 14. ENC
+ aesenclast XDATA1, [KEYS1 + 16*14] ; 14. ENC
+ aesenclast XDATA2, [KEYS2 + 16*14] ; 14. ENC
+ aesenclast XDATA3, [KEYS3 + 16*14] ; 14. ENC
+
+
+ MOVDQ [OUT0 + IDX], XDATA0 ; write back ciphertext
+ MOVDQ [OUT1 + IDX], XDATA1 ; write back ciphertex
+ MOVDQ [OUT2 + IDX], XDATA2 ; write back ciphertex
+ MOVDQ [OUT3 + IDX], XDATA3 ; write back ciphertex
+
+
+ add IDX, 16
+ cmp LEN, IDX
+ jne main_loop
+
+done:
+ ;; update IV
+ movdqa [ARG + _aesarg_IV + 16*0], XDATA0
+ movdqa [ARG + _aesarg_IV + 16*1], XDATA1
+ movdqa [ARG + _aesarg_IV + 16*2], XDATA2
+ movdqa [ARG + _aesarg_IV + 16*3], XDATA3
+
+ ;; update IN and OUT
+ add IN0, LEN
+ mov [ARG + _aesarg_in + 8*0], IN0
+ add IN1, LEN
+ mov [ARG + _aesarg_in + 8*1], IN1
+ add IN2, LEN
+ mov [ARG + _aesarg_in + 8*2], IN2
+ add IN3, LEN
+ mov [ARG + _aesarg_in + 8*3], IN3
+
+ add OUT0, LEN
+ mov [ARG + _aesarg_out + 8*0], OUT0
+ add OUT1, LEN
+ mov [ARG + _aesarg_out + 8*1], OUT1
+ add OUT2, LEN
+ mov [ARG + _aesarg_out + 8*2], OUT2
+ add OUT3, LEN
+ mov [ARG + _aesarg_out + 8*3], OUT3
+
+ pop rbp
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_cfb_128_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes_cfb_128_sse.asm
new file mode 100644
index 000000000..1ee400bb4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_cfb_128_sse.asm
@@ -0,0 +1,167 @@
+;;
+;; Copyright (c) 2017-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/memcpy.asm"
+%include "include/clear_regs.asm"
+
+;;; Routine to do 128 bit CFB AES encrypt/decrypt operations on one block only.
+;;; It processes only one buffer at a time.
+;;; It is designed to manage partial blocks of DOCSIS 3.1 SEC BPI
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX R9 R10 R11
+;; Windows preserves: RBX RCX RDX RBP RSI RDI R8 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX R9 R10
+;; Linux preserves: RBX RCX RDX RBP RSI RDI R8 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0
+;;
+
+%ifndef AES_CFB_128_ONE
+%define AES_CFB_128_ONE aes_cfb_128_one_sse
+%endif
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%define arg5 r8
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%define arg5 [rsp + 5*8]
+%endif
+
+%define OUT arg1
+%define IN arg2
+%define IV arg3
+%define KEYS arg4
+%ifdef LINUX
+%define LEN arg5
+%else
+%define LEN2 arg5
+%define LEN r11
+%endif
+
+%define TMP0 rax
+%define TMP1 r10
+
+%define XDATA xmm0
+%define XIN xmm1
+
+section .text
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_cfb_128_one(void *out, void *in, void *iv, void *keys, uint64_t len)
+;; arg 1: OUT : addr to put clear/cipher text out
+;; arg 2: IN : addr to take cipher/clear text from
+;; arg 3: IV : initialization vector
+;; arg 4: KEYS: pointer to expanded keys structure (16 byte aligned)
+;; arg 5: LEN: length of the text to encrypt/decrypt (valid range is 0 to 16)
+;;
+;; AES CFB128 one block encrypt/decrypt implementation.
+;; The function doesn't update IV. The result of operation can be found in OUT.
+;;
+;; It is primarly designed to process partial block of
+;; DOCSIS 3.1 AES Packet PDU Encryption (I.10)
+;;
+;; It process up to one block only (up to 16 bytes).
+;;
+;; It makes sure not to read more than LEN bytes from IN and
+;; not to store more than LEN bytes to OUT.
+
+MKGLOBAL(AES_CFB_128_ONE,function,)
+align 32
+AES_CFB_128_ONE:
+%ifndef LINUX
+ mov LEN, LEN2
+%endif
+%ifdef SAFE_PARAM
+ cmp IV, 0
+ jz exit_cfb
+
+ cmp KEYS, 0
+ jz exit_cfb
+
+ cmp LEN, 0
+ jz skip_in_out_check
+
+ cmp OUT, 0
+ jz exit_cfb
+
+ cmp IN, 0
+ jz exit_cfb
+
+skip_in_out_check:
+%endif
+
+ simd_load_sse_16 XIN, IN, LEN
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ movdqu XDATA, [IV] ; IV (or next to last block)
+ pxor XDATA, [KEYS + 16*0] ; 0. ARK
+ aesenc XDATA, [KEYS + 16*1] ; 1. ENC
+ aesenc XDATA, [KEYS + 16*2] ; 2. ENC
+ aesenc XDATA, [KEYS + 16*3] ; 3. ENC
+ aesenc XDATA, [KEYS + 16*4] ; 4. ENC
+ aesenc XDATA, [KEYS + 16*5] ; 5. ENC
+ aesenc XDATA, [KEYS + 16*6] ; 6. ENC
+ aesenc XDATA, [KEYS + 16*7] ; 7. ENC
+ aesenc XDATA, [KEYS + 16*8] ; 8. ENC
+ aesenc XDATA, [KEYS + 16*9] ; 9. ENC
+ aesenclast XDATA, [KEYS + 16*10] ; 10. ENC
+
+ pxor XDATA, XIN ; plaintext/ciphertext XOR block cipher encryption
+
+ simd_store_sse OUT, XDATA, LEN, TMP0, TMP1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%ifdef SAFE_DATA
+ ;; XDATA and XIN are the only scratch SIMD registers used
+ clear_xmms_sse XDATA, XIN
+ clear_scratch_gps_asm
+%endif
+exit_cfb:
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_ecb_by4_sse.asm b/src/spdk/intel-ipsec-mb/sse/aes_ecb_by4_sse.asm
new file mode 100644
index 000000000..c4b767932
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_ecb_by4_sse.asm
@@ -0,0 +1,654 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; routine to do AES ECB encrypt/decrypt on 16n bytes doing AES by 4
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void aes_ecb_x_y_sse(void *in,
+; UINT128 keys[],
+; void *out,
+; UINT64 len_bytes);
+;
+; x = direction (enc/dec)
+; y = key size (128/192/256)
+; arg 1: IN: pointer to input (cipher text)
+; arg 2: KEYS: pointer to keys
+; arg 3: OUT: pointer to output (plain text)
+; arg 4: LEN: length in bytes (multiple of 16)
+;
+
+%include "include/os.asm"
+
+%ifndef AES_ECB_ENC_128
+%define AES_ECB_ENC_128 aes_ecb_enc_128_sse
+%define AES_ECB_ENC_192 aes_ecb_enc_192_sse
+%define AES_ECB_ENC_256 aes_ecb_enc_256_sse
+%define AES_ECB_DEC_128 aes_ecb_dec_128_sse
+%define AES_ECB_DEC_192 aes_ecb_dec_192_sse
+%define AES_ECB_DEC_256 aes_ecb_dec_256_sse
+%endif
+
+%ifdef LINUX
+%define IN rdi
+%define KEYS rsi
+%define OUT rdx
+%define LEN rcx
+%else
+%define IN rcx
+%define KEYS rdx
+%define OUT r8
+%define LEN r9
+%endif
+
+%define IDX rax
+%define TMP IDX
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+%define XKEY0 xmm4
+%define XKEY2 xmm5
+%define XKEY4 xmm6
+%define XKEY6 xmm7
+%define XKEY10 xmm8
+%define XKEY_A xmm14
+%define XKEY_B xmm15
+
+section .text
+
+%macro AES_ECB 2
+%define %%NROUNDS %1 ; [in] Number of AES rounds, numerical value
+%define %%DIR %2 ; [in] Direction (encrypt/decrypt)
+
+%ifidn %%DIR, ENC
+%define AES aesenc
+%define AES_LAST aesenclast
+%else ; DIR = DEC
+%define AES aesdec
+%define AES_LAST aesdeclast
+%endif
+ mov TMP, LEN
+ and TMP, 3*16
+ jz %%initial_4
+ cmp TMP, 2*16
+ jb %%initial_1
+ ja %%initial_3
+
+%%initial_2:
+ ; load plain/cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, [KEYS + 1*16] ; 1. ENC
+ AES XDATA1, [KEYS + 1*16]
+
+ mov IDX, 2*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, [KEYS + 3*16] ; 3. ENC
+ AES XDATA1, [KEYS + 3*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, [KEYS + 5*16] ; 5. ENC
+ AES XDATA1, [KEYS + 5*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, [KEYS + 7*16] ; 7. ENC
+ AES XDATA1, [KEYS + 7*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ AES XDATA0, [KEYS + 9*16] ; 9. ENC
+ AES XDATA1, [KEYS + 9*16]
+
+%if %%NROUNDS >= 12
+ AES XDATA0, XKEY10 ; 10. ENC
+ AES XDATA1, XKEY10
+
+ AES XDATA0, [KEYS + 11*16] ; 11. ENC
+ AES XDATA1, [KEYS + 11*16]
+%endif
+
+%if %%NROUNDS == 14
+ AES XDATA0, [KEYS + 12*16] ; 12. ENC
+ AES XDATA1, [KEYS + 12*16]
+
+ AES XDATA0, [KEYS + 13*16] ; 13. ENC
+ AES XDATA1, [KEYS + 13*16]
+%endif
+
+%if %%NROUNDS == 10
+ AES_LAST XDATA0, XKEY10 ; 10. ENC
+ AES_LAST XDATA1, XKEY10
+%elif %%NROUNDS == 12
+ AES_LAST XDATA0, [KEYS + 12*16] ; 12. ENC
+ AES_LAST XDATA1, [KEYS + 12*16]
+%else
+ AES_LAST XDATA0, [KEYS + 14*16] ; 14. ENC
+ AES_LAST XDATA1, [KEYS + 14*16]
+%endif
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+
+ cmp LEN, 2*16
+ je %%done
+ jmp %%main_loop
+
+
+ align 16
+%%initial_1:
+ ; load plain/cipher text
+ movdqu XDATA0, [IN + 0*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, [KEYS + 1*16] ; 1. ENC
+
+ mov IDX, 1*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, [KEYS + 3*16] ; 3. ENC
+
+ AES XDATA0, XKEY4 ; 4. ENC
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, [KEYS + 5*16] ; 5. ENC
+
+ AES XDATA0, XKEY6 ; 6. ENC
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, [KEYS + 7*16] ; 7. ENC
+
+ AES XDATA0, XKEY_B ; 8. ENC
+
+ movdqa XKEY10, [KEYS + 10*16]
+
+ AES XDATA0, [KEYS + 9*16] ; 9. ENC
+
+%if %%NROUNDS >= 12
+ AES XDATA0, XKEY10 ; 10. ENC
+
+ AES XDATA0, [KEYS + 11*16] ; 11. ENC
+%endif
+
+%if %%NROUNDS == 14
+ AES XDATA0, [KEYS + 12*16] ; 12. ENC
+
+ AES XDATA0, [KEYS + 13*16] ; 13. ENC
+%endif
+
+%if %%NROUNDS == 10
+
+ AES_LAST XDATA0, XKEY10 ; 10. ENC
+%elif %%NROUNDS == 12
+ AES_LAST XDATA0, [KEYS + 12*16] ; 12. ENC
+%else
+ AES_LAST XDATA0, [KEYS + 14*16] ; 14. ENC
+%endif
+
+ movdqu [OUT + 0*16], XDATA0
+
+ cmp LEN, 1*16
+ je %%done
+ jmp %%main_loop
+
+
+%%initial_3:
+ ; load plain/cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+ mov IDX, 3*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+%if %%NROUNDS >= 12
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+
+%endif
+
+%if %%NROUNDS == 14
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+
+ cmp LEN, 3*16
+ je %%done
+ jmp %%main_loop
+
+
+ align 16
+%%initial_4:
+ ; load plain/cipher text
+ movdqu XDATA0, [IN + 0*16]
+ movdqu XDATA1, [IN + 1*16]
+ movdqu XDATA2, [IN + 2*16]
+ movdqu XDATA3, [IN + 3*16]
+
+ movdqa XKEY0, [KEYS + 0*16]
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ movdqa XKEY2, [KEYS + 2*16]
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ mov IDX, 4*16
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+ AES XDATA3, XKEY2
+
+ movdqa XKEY4, [KEYS + 4*16]
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+ AES XDATA3, XKEY4
+
+ movdqa XKEY6, [KEYS + 6*16]
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+ AES XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+%if %%NROUNDS >= 12
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+%if %%NROUNDS == 14
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+ AES_LAST XDATA3, XKEY_B
+
+ movdqu [OUT + 0*16], XDATA0
+ movdqu [OUT + 1*16], XDATA1
+ movdqu [OUT + 2*16], XDATA2
+ movdqu [OUT + 3*16], XDATA3
+
+ cmp LEN, 4*16
+ jz %%done
+ jmp %%main_loop
+
+ align 16
+%%main_loop:
+ ; load plain/cipher text
+ movdqu XDATA0, [IN + IDX + 0*16]
+ movdqu XDATA1, [IN + IDX + 1*16]
+ movdqu XDATA2, [IN + IDX + 2*16]
+ movdqu XDATA3, [IN + IDX + 3*16]
+
+ movdqa XKEY_A, [KEYS + 1*16]
+
+ pxor XDATA0, XKEY0 ; 0. ARK
+ pxor XDATA1, XKEY0
+ pxor XDATA2, XKEY0
+ pxor XDATA3, XKEY0
+
+ add IDX, 4*16
+
+ AES XDATA0, XKEY_A ; 1. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 3*16]
+
+ AES XDATA0, XKEY2 ; 2. ENC
+ AES XDATA1, XKEY2
+ AES XDATA2, XKEY2
+ AES XDATA3, XKEY2
+
+ AES XDATA0, XKEY_A ; 3. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 5*16]
+
+ AES XDATA0, XKEY4 ; 4. ENC
+ AES XDATA1, XKEY4
+ AES XDATA2, XKEY4
+ AES XDATA3, XKEY4
+
+ AES XDATA0, XKEY_A ; 5. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 7*16]
+
+ AES XDATA0, XKEY6 ; 6. ENC
+ AES XDATA1, XKEY6
+ AES XDATA2, XKEY6
+ AES XDATA3, XKEY6
+
+ movdqa XKEY_B, [KEYS + 8*16]
+
+ AES XDATA0, XKEY_A ; 7. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+ movdqa XKEY_A, [KEYS + 9*16]
+
+ AES XDATA0, XKEY_B ; 8. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 10*16]
+
+ AES XDATA0, XKEY_A ; 9. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+
+%if %%NROUNDS >= 12
+ movdqa XKEY_A, [KEYS + 11*16]
+
+ AES XDATA0, XKEY_B ; 10. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 12*16]
+
+ AES XDATA0, XKEY_A ; 11. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+%if %%NROUNDS == 14
+ movdqa XKEY_A, [KEYS + 13*16]
+
+ AES XDATA0, XKEY_B ; 12. ENC
+ AES XDATA1, XKEY_B
+ AES XDATA2, XKEY_B
+ AES XDATA3, XKEY_B
+
+ movdqa XKEY_B, [KEYS + 14*16]
+
+ AES XDATA0, XKEY_A ; 13. ENC
+ AES XDATA1, XKEY_A
+ AES XDATA2, XKEY_A
+ AES XDATA3, XKEY_A
+%endif
+
+ AES_LAST XDATA0, XKEY_B ; 10/12/14. ENC (depending on key size)
+ AES_LAST XDATA1, XKEY_B
+ AES_LAST XDATA2, XKEY_B
+ AES_LAST XDATA3, XKEY_B
+
+ movdqu [OUT + IDX + 0*16 - 4*16], XDATA0
+ movdqu [OUT + IDX + 1*16 - 4*16], XDATA1
+ movdqu [OUT + IDX + 2*16 - 4*16], XDATA2
+ movdqu [OUT + IDX + 3*16 - 4*16], XDATA3
+
+ cmp IDX, LEN
+ jne %%main_loop
+
+%%done:
+
+ ret
+
+%endmacro
+
+align 16
+MKGLOBAL(AES_ECB_ENC_128,function,internal)
+AES_ECB_ENC_128:
+
+ AES_ECB 10, ENC
+
+align 16
+MKGLOBAL(AES_ECB_ENC_192,function,internal)
+AES_ECB_ENC_192:
+
+ AES_ECB 12, ENC
+
+align 16
+MKGLOBAL(AES_ECB_ENC_256,function,internal)
+AES_ECB_ENC_256:
+
+ AES_ECB 14, ENC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_128,function,internal)
+AES_ECB_DEC_128:
+
+ AES_ECB 10, DEC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_192,function,internal)
+AES_ECB_DEC_192:
+
+ AES_ECB 12, DEC
+
+align 16
+MKGLOBAL(AES_ECB_DEC_256,function,internal)
+AES_ECB_DEC_256:
+
+ AES_ECB 14, DEC
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/aes_xcbc_mac_128_x4.asm b/src/spdk/intel-ipsec-mb/sse/aes_xcbc_mac_128_x4.asm
new file mode 100644
index 000000000..afbb38512
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/aes_xcbc_mac_128_x4.asm
@@ -0,0 +1,303 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;;; routine to do 128 bit AES XCBC
+;;; process 4 buffers at a time, single data structure as input
+;;; Updates In pointer at end
+
+;; clobbers all registers except for ARG1 and rbp
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+
+%ifndef AES_XCBC_X4
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4
+%endif
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+%macro pxor2 2
+ MOVDQ XTMP, %2
+ pxor %1, XTMP
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; struct AES_XCBC_ARGS_x8 {
+;; void* in[8];
+;; UINT128* keys[8];
+;; UINT128 ICV[8];
+;; }
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void aes_xcbc_mac_128_x4(AES_XCBC_ARGS_x8 *args, UINT64 len);
+;; arg 1: ARG : addr of AES_XCBC_ARGS_x8 structure
+;; arg 2: LEN : len (in units of bytes)
+
+%ifdef LINUX
+%define ARG rdi
+%define LEN rsi
+%define REG3 rcx
+%define REG4 rdx
+%else
+%define ARG rcx
+%define LEN rdx
+%define REG3 rsi
+%define REG4 rdi
+%endif
+
+%define IDX rax
+
+%define IN0 r8
+%define KEYS0 rbx
+%define OUT0 r9
+
+%define IN1 r10
+%define KEYS1 REG3
+%define OUT1 r11
+
+%define IN2 r12
+%define KEYS2 REG4
+%define OUT2 r13
+
+%define IN3 r14
+%define KEYS3 rbp
+%define OUT3 r15
+
+
+%define XDATA0 xmm0
+%define XDATA1 xmm1
+%define XDATA2 xmm2
+%define XDATA3 xmm3
+
+%define XKEY0_3 xmm4
+%define XKEY0_6 [KEYS0 + 16*6]
+%define XTMP xmm5
+%define XKEY0_9 xmm6
+
+%define XKEY1_3 xmm7
+%define XKEY1_6 xmm8
+%define XKEY1_9 xmm9
+
+%define XKEY2_3 xmm10
+%define XKEY2_6 xmm11
+%define XKEY2_9 xmm12
+
+%define XKEY3_3 xmm13
+%define XKEY3_6 xmm14
+%define XKEY3_9 xmm15
+
+section .text
+
+MKGLOBAL(AES_XCBC_X4,function,internal)
+AES_XCBC_X4:
+
+ push rbp
+
+ mov IDX, 16
+
+ mov IN0, [ARG + _aesxcbcarg_in + 8*0]
+ mov IN1, [ARG + _aesxcbcarg_in + 8*1]
+ mov IN2, [ARG + _aesxcbcarg_in + 8*2]
+ mov IN3, [ARG + _aesxcbcarg_in + 8*3]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ MOVDQ XDATA0, [IN0] ; load first block of plain text
+ MOVDQ XDATA1, [IN1] ; load first block of plain text
+ MOVDQ XDATA2, [IN2] ; load first block of plain text
+ MOVDQ XDATA3, [IN3] ; load first block of plain text
+
+ mov KEYS0, [ARG + _aesxcbcarg_keys + 8*0]
+ mov KEYS1, [ARG + _aesxcbcarg_keys + 8*1]
+ mov KEYS2, [ARG + _aesxcbcarg_keys + 8*2]
+ mov KEYS3, [ARG + _aesxcbcarg_keys + 8*3]
+
+ pxor XDATA0, [ARG + _aesxcbcarg_ICV + 16*0] ; plaintext XOR ICV
+ pxor XDATA1, [ARG + _aesxcbcarg_ICV + 16*1] ; plaintext XOR ICV
+ pxor XDATA2, [ARG + _aesxcbcarg_ICV + 16*2] ; plaintext XOR ICV
+ pxor XDATA3, [ARG + _aesxcbcarg_ICV + 16*3] ; plaintext XOR ICV
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ movdqa XKEY0_3, [KEYS0 + 16*3] ; load round 3 key
+ movdqa XKEY1_3, [KEYS1 + 16*3] ; load round 3 key
+ movdqa XKEY2_3, [KEYS2 + 16*3] ; load round 3 key
+ movdqa XKEY3_3, [KEYS3 + 16*3] ; load round 3 key
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ movdqa XKEY1_6, [KEYS1 + 16*6] ; load round 6 key
+ movdqa XKEY2_6, [KEYS2 + 16*6] ; load round 6 key
+ movdqa XKEY3_6, [KEYS3 + 16*6] ; load round 6 key
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ movdqa XKEY0_9, [KEYS0 + 16*9] ; load round 9 key
+ movdqa XKEY1_9, [KEYS1 + 16*9] ; load round 9 key
+ movdqa XKEY2_9, [KEYS2 + 16*9] ; load round 9 key
+ movdqa XKEY3_9, [KEYS3 + 16*9] ; load round 9 key
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ cmp LEN, IDX
+ je done
+
+main_loop:
+ pxor2 XDATA0, [IN0 + IDX] ; plaintext XOR ICV
+ pxor2 XDATA1, [IN1 + IDX] ; plaintext XOR ICV
+ pxor2 XDATA2, [IN2 + IDX] ; plaintext XOR ICV
+ pxor2 XDATA3, [IN3 + IDX] ; plaintext XOR ICV
+
+ pxor XDATA0, [KEYS0 + 16*0] ; 0. ARK
+ pxor XDATA1, [KEYS1 + 16*0] ; 0. ARK
+ pxor XDATA2, [KEYS2 + 16*0] ; 0. ARK
+ pxor XDATA3, [KEYS3 + 16*0] ; 0. ARK
+
+ aesenc XDATA0, [KEYS0 + 16*1] ; 1. ENC
+ aesenc XDATA1, [KEYS1 + 16*1] ; 1. ENC
+ aesenc XDATA2, [KEYS2 + 16*1] ; 1. ENC
+ aesenc XDATA3, [KEYS3 + 16*1] ; 1. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*2] ; 2. ENC
+ aesenc XDATA1, [KEYS1 + 16*2] ; 2. ENC
+ aesenc XDATA2, [KEYS2 + 16*2] ; 2. ENC
+ aesenc XDATA3, [KEYS3 + 16*2] ; 2. ENC
+
+ aesenc XDATA0, XKEY0_3 ; 3. ENC
+ aesenc XDATA1, XKEY1_3 ; 3. ENC
+ aesenc XDATA2, XKEY2_3 ; 3. ENC
+ aesenc XDATA3, XKEY3_3 ; 3. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*4] ; 4. ENC
+ aesenc XDATA1, [KEYS1 + 16*4] ; 4. ENC
+ aesenc XDATA2, [KEYS2 + 16*4] ; 4. ENC
+ aesenc XDATA3, [KEYS3 + 16*4] ; 4. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*5] ; 5. ENC
+ aesenc XDATA1, [KEYS1 + 16*5] ; 5. ENC
+ aesenc XDATA2, [KEYS2 + 16*5] ; 5. ENC
+ aesenc XDATA3, [KEYS3 + 16*5] ; 5. ENC
+
+ aesenc XDATA0, XKEY0_6 ; 6. ENC
+ aesenc XDATA1, XKEY1_6 ; 6. ENC
+ aesenc XDATA2, XKEY2_6 ; 6. ENC
+ aesenc XDATA3, XKEY3_6 ; 6. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*7] ; 7. ENC
+ aesenc XDATA1, [KEYS1 + 16*7] ; 7. ENC
+ aesenc XDATA2, [KEYS2 + 16*7] ; 7. ENC
+ aesenc XDATA3, [KEYS3 + 16*7] ; 7. ENC
+
+ aesenc XDATA0, [KEYS0 + 16*8] ; 8. ENC
+ aesenc XDATA1, [KEYS1 + 16*8] ; 8. ENC
+ aesenc XDATA2, [KEYS2 + 16*8] ; 8. ENC
+ aesenc XDATA3, [KEYS3 + 16*8] ; 8. ENC
+
+ aesenc XDATA0, XKEY0_9 ; 9. ENC
+ aesenc XDATA1, XKEY1_9 ; 9. ENC
+ aesenc XDATA2, XKEY2_9 ; 9. ENC
+ aesenc XDATA3, XKEY3_9 ; 9. ENC
+
+ aesenclast XDATA0, [KEYS0 + 16*10] ; 10. ENC
+ aesenclast XDATA1, [KEYS1 + 16*10] ; 10. ENC
+ aesenclast XDATA2, [KEYS2 + 16*10] ; 10. ENC
+ aesenclast XDATA3, [KEYS3 + 16*10] ; 10. ENC
+
+ add IDX, 16
+ cmp LEN, IDX
+ jne main_loop
+
+done:
+ ;; update ICV
+ movdqa [ARG + _aesxcbcarg_ICV + 16*0], XDATA0
+ movdqa [ARG + _aesxcbcarg_ICV + 16*1], XDATA1
+ movdqa [ARG + _aesxcbcarg_ICV + 16*2], XDATA2
+ movdqa [ARG + _aesxcbcarg_ICV + 16*3], XDATA3
+
+ ;; update IN
+ add IN0, LEN
+ mov [ARG + _aesxcbcarg_in + 8*0], IN0
+ add IN1, LEN
+ mov [ARG + _aesxcbcarg_in + 8*1], IN1
+ add IN2, LEN
+ mov [ARG + _aesxcbcarg_in + 8*2], IN2
+ add IN3, LEN
+ mov [ARG + _aesxcbcarg_in + 8*3], IN3
+
+ pop rbp
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/gcm128_sse.asm b/src/spdk/intel-ipsec-mb/sse/gcm128_sse.asm
new file mode 100644
index 000000000..b8d3ea963
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/gcm128_sse.asm
@@ -0,0 +1,30 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%define GCM128_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/gcm192_sse.asm b/src/spdk/intel-ipsec-mb/sse/gcm192_sse.asm
new file mode 100644
index 000000000..68e995a06
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/gcm192_sse.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2017-2018, Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM192_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/gcm256_sse.asm b/src/spdk/intel-ipsec-mb/sse/gcm256_sse.asm
new file mode 100644
index 000000000..3898411a1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/gcm256_sse.asm
@@ -0,0 +1,31 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2018 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define GCM256_MODE 1
+%include "sse/gcm_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/gcm_sse.asm b/src/spdk/intel-ipsec-mb/sse/gcm_sse.asm
new file mode 100644
index 000000000..d053da51f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/gcm_sse.asm
@@ -0,0 +1,2586 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright(c) 2011-2019 Intel Corporation All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Authors:
+; Erdinc Ozturk
+; Vinodh Gopal
+; James Guilford
+;
+;
+; References:
+; This code was derived and highly optimized from the code described in paper:
+; Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on Intel Architecture Processors. August, 2010
+;
+; For the shift-based reductions used in this code, we used the method described in paper:
+; Shay Gueron, Michael E. Kounavis. Intel Carry-Less Multiplication Instruction and its Usage for Computing the GCM Mode. January, 2010.
+;
+;
+;
+;
+; Assumptions:
+;
+;
+;
+; iv:
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Salt (From the SA) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | Initialization Vector |
+; | (This is the sequence number from IPSec header) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x1 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+;
+;
+; AAD:
+; AAD will be padded with 0 to the next 16byte multiple
+; for example, assume AAD is a u32 vector
+;
+; if AAD is 8 bytes:
+; AAD[3] = {A0, A1};
+; padded AAD in xmm register = {A1 A0 0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A1) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 32-bit Sequence Number (A0) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 32-bit Sequence Number
+;
+; if AAD is 12 bytes:
+; AAD[3] = {A0, A1, A2};
+; padded AAD in xmm register = {A2 A1 A0 0}
+;
+; 0 1 2 3
+; 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | SPI (A2) |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 64-bit Extended Sequence Number {A1,A0} |
+; | |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+; | 0x0 |
+; +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+;
+; AAD Format with 64-bit Extended Sequence Number
+;
+;
+; aadLen:
+; Must be a multiple of 4 bytes and from the definition of the spec.
+; The code additionally supports any aadLen length.
+;
+; TLen:
+; from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+;
+; poly = x^128 + x^127 + x^126 + x^121 + 1
+; throughout the code, one tab and two tab indentations are used. one tab is for GHASH part, two tabs is for AES part.
+;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+%include "include/clear_regs.asm"
+%include "include/gcm_defines.asm"
+%include "include/gcm_keys_sse_avx.asm"
+%include "include/memcpy.asm"
+
+%ifndef GCM128_MODE
+%ifndef GCM192_MODE
+%ifndef GCM256_MODE
+%error "No GCM mode selected for gcm_sse.asm!"
+%endif
+%endif
+%endif
+
+%ifdef NO_AESNI
+%define SSE sse_no_aesni
+%else
+%define SSE sse
+%endif
+
+%ifdef GCM128_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _128 %+ y %+ SSE
+%define NROUNDS 9
+%endif
+
+%ifdef GCM192_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _192 %+ y %+ SSE
+%define NROUNDS 11
+%endif
+
+%ifdef GCM256_MODE
+%define FN_NAME(x,y) aes_gcm_ %+ x %+ _256 %+ y %+ SSE
+%define NROUNDS 13
+%endif
+
+default rel
+; need to push 4 registers into stack to maintain
+%define STACK_OFFSET 8*4
+
+%define TMP2 16*0 ; Temporary storage for AES State 2 (State 1 is stored in an XMM register)
+%define TMP3 16*1 ; Temporary storage for AES State 3
+%define TMP4 16*2 ; Temporary storage for AES State 4
+%define TMP5 16*3 ; Temporary storage for AES State 5
+%define TMP6 16*4 ; Temporary storage for AES State 6
+%define TMP7 16*5 ; Temporary storage for AES State 7
+%define TMP8 16*6 ; Temporary storage for AES State 8
+
+%define LOCAL_STORAGE 16*7
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define VARIABLE_OFFSET LOCAL_STORAGE + XMM_STORAGE
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Utility Macros
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+; Input: A and B (128-bits each, bit-reflected)
+; Output: C = A*B*x mod poly, (i.e. >>1 )
+; To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+; GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GHASH_MUL 7
+%define %%GH %1 ; 16 Bytes
+%define %%HK %2 ; 16 Bytes
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+ ; %%GH, %%HK hold the values for the two operands which are carry-less multiplied
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; Karatsuba Method
+ movdqa %%T1, %%GH
+ pshufd %%T2, %%GH, 01001110b
+ pshufd %%T3, %%HK, 01001110b
+ pxor %%T2, %%GH ; %%T2 = (a1+a0)
+ pxor %%T3, %%HK ; %%T3 = (b1+b0)
+
+ pclmulqdq %%T1, %%HK, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%GH, %%HK, 0x00 ; %%GH = a0*b0
+ pclmulqdq %%T2, %%T3, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T2, %%GH
+ pxor %%T2, %%T1 ; %%T2 = a0*b1+a1*b0
+
+ movdqa %%T3, %%T2
+ pslldq %%T3, 8 ; shift-L %%T3 2 DWs
+ psrldq %%T2, 8 ; shift-R %%T2 2 DWs
+ pxor %%GH, %%T3
+ pxor %%T1, %%T2 ; <%%T1:%%GH> holds the result of the carry-less multiplication of %%GH by %%HK
+
+
+ ;first phase of the reduction
+ movdqa %%T2, %%GH
+ movdqa %%T3, %%GH
+ movdqa %%T4, %%GH ; move %%GH into %%T2, %%T3, %%T4 in order to perform the three shifts independently
+
+ pslld %%T2, 31 ; packed right shifting << 31
+ pslld %%T3, 30 ; packed right shifting shift << 30
+ pslld %%T4, 25 ; packed right shifting shift << 25
+ pxor %%T2, %%T3 ; xor the shifted versions
+ pxor %%T2, %%T4
+
+ movdqa %%T5, %%T2
+ psrldq %%T5, 4 ; shift-R %%T5 1 DW
+
+ pslldq %%T2, 12 ; shift-L %%T2 3 DWs
+ pxor %%GH, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;second phase of the reduction
+ movdqa %%T2,%%GH ; make 3 copies of %%GH (in in %%T2, %%T3, %%T4) for doing three shift operations
+ movdqa %%T3,%%GH
+ movdqa %%T4,%%GH
+
+ psrld %%T2,1 ; packed left shifting >> 1
+ psrld %%T3,2 ; packed left shifting >> 2
+ psrld %%T4,7 ; packed left shifting >> 7
+ pxor %%T2,%%T3 ; xor the shifted versions
+ pxor %%T2,%%T4
+
+ pxor %%T2, %%T5
+ pxor %%GH, %%T2
+ pxor %%GH, %%T1 ; the result is in %%T1
+
+
+%endmacro
+
+
+%macro PRECOMPUTE 8
+%define %%GDATA %1
+%define %%HK %2
+%define %%T1 %3
+%define %%T2 %4
+%define %%T3 %5
+%define %%T4 %6
+%define %%T5 %7
+%define %%T6 %8
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ movdqa %%T4, %%HK
+ pshufd %%T1, %%HK, 01001110b
+ pxor %%T1, %%HK
+ movdqu [%%GDATA + HashKey_k], %%T1
+
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^2<<1 mod poly
+ movdqu [%%GDATA + HashKey_2], %%T4 ; [HashKey_2] = HashKey^2<<1 mod poly
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_2_k], %%T1
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^3<<1 mod poly
+ movdqu [%%GDATA + HashKey_3], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_3_k], %%T1
+
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^4<<1 mod poly
+ movdqu [%%GDATA + HashKey_4], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_4_k], %%T1
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^5<<1 mod poly
+ movdqu [%%GDATA + HashKey_5], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_5_k], %%T1
+
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^6<<1 mod poly
+ movdqu [%%GDATA + HashKey_6], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_6_k], %%T1
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^7<<1 mod poly
+ movdqu [%%GDATA + HashKey_7], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_7_k], %%T1
+
+ GHASH_MUL %%T4, %%HK, %%T1, %%T2, %%T3, %%T5, %%T6 ; %%T4 = HashKey^8<<1 mod poly
+ movdqu [%%GDATA + HashKey_8], %%T4
+ pshufd %%T1, %%T4, 01001110b
+ pxor %%T1, %%T4
+ movdqu [%%GDATA + HashKey_8_k], %%T1
+
+
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; READ_SMALL_DATA_INPUT: Packs xmm register with data when data input is less than 16 bytes.
+; Returns 0 if data has length 0.
+; Input: The input data (INPUT), that data's length (LENGTH).
+; Output: The packed xmm register (OUTPUT).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro READ_SMALL_DATA_INPUT 6
+%define %%OUTPUT %1 ; %%OUTPUT is an xmm register
+%define %%INPUT %2
+%define %%LENGTH %3
+%define %%END_READ_LOCATION %4 ; All this and the lower inputs are temp registers
+%define %%COUNTER %5
+%define %%TMP1 %6
+
+ pxor %%OUTPUT, %%OUTPUT
+ mov %%COUNTER, %%LENGTH
+ mov %%END_READ_LOCATION, %%INPUT
+ add %%END_READ_LOCATION, %%LENGTH
+ xor %%TMP1, %%TMP1
+
+
+ cmp %%COUNTER, 8
+ jl %%_byte_loop_2
+ pinsrq %%OUTPUT, [%%INPUT],0 ;Read in 8 bytes if they exists
+ je %%_done
+
+ sub %%COUNTER, 8
+
+%%_byte_loop_1: ;Read in data 1 byte at a time while data is left
+ shl %%TMP1, 8 ;This loop handles when 8 bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_1
+ pinsrq %%OUTPUT, %%TMP1, 1
+ jmp %%_done
+
+%%_byte_loop_2: ;Read in data 1 byte at a time while data is left
+ cmp %%COUNTER, 0
+ je %%_done
+ shl %%TMP1, 8 ;This loop handles when no bytes were already read in
+ dec %%END_READ_LOCATION
+ mov BYTE(%%TMP1), BYTE [%%END_READ_LOCATION]
+ dec %%COUNTER
+ jg %%_byte_loop_2
+ pinsrq %%OUTPUT, %%TMP1, 0
+%%_done:
+
+%endmacro ; READ_SMALL_DATA_INPUT
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+; Input: The input data (A_IN), that data's length (A_LEN), and the hash key (HASH_KEY).
+; Output: The hash of the data (AAD_HASH).
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro CALC_AAD_HASH 15
+%define %%A_IN %1
+%define %%A_LEN %2
+%define %%AAD_HASH %3
+%define %%GDATA_KEY %4
+%define %%XTMP0 %5 ; xmm temp reg 5
+%define %%XTMP1 %6 ; xmm temp reg 5
+%define %%XTMP2 %7
+%define %%XTMP3 %8
+%define %%XTMP4 %9
+%define %%XTMP5 %10 ; xmm temp reg 5
+%define %%T1 %11 ; temp reg 1
+%define %%T2 %12
+%define %%T3 %13
+%define %%T4 %14
+%define %%T5 %15 ; temp reg 5
+
+
+ mov %%T1, %%A_IN ; T1 = AAD
+ mov %%T2, %%A_LEN ; T2 = aadLen
+ pxor %%AAD_HASH, %%AAD_HASH
+
+%%_get_AAD_loop128:
+ cmp %%T2, 128
+ jl %%_exit_AAD_loop128
+
+ movdqu %%XTMP0, [%%T1 + 16*0]
+ pshufb %%XTMP0, [rel SHUF_MASK]
+
+ pxor %%XTMP0, %%AAD_HASH
+
+ movdqu %%XTMP5, [%%GDATA_KEY + HashKey_8]
+ movdqa %%XTMP1, %%XTMP0
+ movdqa %%XTMP2, %%XTMP0
+ movdqa %%XTMP3, %%XTMP0
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP1, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%XTMP2, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ pclmulqdq %%XTMP3, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ pxor %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+%assign i 1
+%assign j 7
+%rep 7
+ movdqu %%XTMP0, [%%T1 + 16*i]
+ pshufb %%XTMP0, [rel SHUF_MASK]
+
+ movdqu %%XTMP5, [%%GDATA_KEY + HashKey_ %+ j]
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ pxor %%XTMP1, %%XTMP4
+
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ pxor %%XTMP2, %%XTMP4
+
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ pxor %%XTMP3, %%XTMP4
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x10
+ pxor %%XTMP3, %%XTMP4
+%assign i (i + 1)
+%assign j (j - 1)
+%endrep
+
+ movdqa %%XTMP4, %%XTMP3
+ pslldq %%XTMP4, 8 ; shift-L 2 DWs
+ psrldq %%XTMP3, 8 ; shift-R 2 DWs
+ pxor %%XTMP2, %%XTMP4
+ pxor %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ movdqa %%XTMP5, [rel POLY2]
+ movdqa %%XTMP0, %%XTMP5
+ pclmulqdq %%XTMP0, %%XTMP2, 0x01
+ pslldq %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ pxor %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ movdqa %%XTMP3, %%XTMP5
+ pclmulqdq %%XTMP3, %%XTMP2, 0x00
+ psrldq %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ movdqa %%XTMP4, %%XTMP5
+ pclmulqdq %%XTMP4, %%XTMP2, 0x10
+ pslldq %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ pxor %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa %%AAD_HASH, %%XTMP1
+ pxor %%AAD_HASH, %%XTMP4 ; the result is in %%T1
+
+ sub %%T2, 128
+ je %%_CALC_AAD_done
+
+ add %%T1, 128
+ jmp %%_get_AAD_loop128
+
+%%_exit_AAD_loop128:
+ cmp %%T2, 16
+ jl %%_get_small_AAD_block
+
+ ;; calculate hash_key position to start with
+ mov %%T3, %%T2
+ and %%T3, -16 ; 1 to 7 blocks possible here
+ neg %%T3
+ add %%T3, HashKey_1 + 16
+ lea %%T3, [%%GDATA_KEY + %%T3]
+
+ movdqu %%XTMP0, [%%T1]
+ pshufb %%XTMP0, [rel SHUF_MASK]
+
+ pxor %%XTMP0, %%AAD_HASH
+
+ movdqu %%XTMP5, [%%T3]
+ movdqa %%XTMP1, %%XTMP0
+ movdqa %%XTMP2, %%XTMP0
+ movdqa %%XTMP3, %%XTMP0
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP1, %%XTMP5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%XTMP2, %%XTMP5, 0x00 ; %%T2 = a0*b0
+ pclmulqdq %%XTMP3, %%XTMP5, 0x01 ; %%T3 = a1*b0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x10 ; %%T4 = a0*b1
+ pxor %%XTMP3, %%XTMP4 ; %%T3 = a1*b0 + a0*b1
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16 ; move to next data block
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+
+%%_AAD_blocks:
+ movdqu %%XTMP0, [%%T1]
+ pshufb %%XTMP0, [rel SHUF_MASK]
+
+ movdqu %%XTMP5, [%%T3]
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x11 ; %%T1 = T1 + a1*b1
+ pxor %%XTMP1, %%XTMP4
+
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x00 ; %%T2 = T2 + a0*b0
+ pxor %%XTMP2, %%XTMP4
+
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x01 ; %%T3 = T3 + a1*b0 + a0*b1
+ pxor %%XTMP3, %%XTMP4
+ movdqa %%XTMP4, %%XTMP0
+ pclmulqdq %%XTMP4, %%XTMP5, 0x10
+ pxor %%XTMP3, %%XTMP4
+
+ add %%T3, 16 ; move to next hashkey
+ add %%T1, 16
+ sub %%T2, 16
+ cmp %%T2, 16
+ jl %%_AAD_reduce
+ jmp %%_AAD_blocks
+
+%%_AAD_reduce:
+ movdqa %%XTMP4, %%XTMP3
+ pslldq %%XTMP4, 8 ; shift-L 2 DWs
+ psrldq %%XTMP3, 8 ; shift-R 2 DWs
+ pxor %%XTMP2, %%XTMP4
+ pxor %%XTMP1, %%XTMP3 ; accumulate the results in %%T1(M):%%T2(L)
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;first phase of the reduction
+ movdqa %%XTMP5, [rel POLY2]
+ movdqa %%XTMP0, %%XTMP5
+ pclmulqdq %%XTMP0, %%XTMP2, 0x01
+ pslldq %%XTMP0, 8 ; shift-L xmm2 2 DWs
+ pxor %%XTMP2, %%XTMP0 ; first phase of the reduction complete
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;second phase of the reduction
+ movdqa %%XTMP3, %%XTMP5
+ pclmulqdq %%XTMP3, %%XTMP2, 0x00
+ psrldq %%XTMP3, 4 ; shift-R 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ movdqa %%XTMP4, %%XTMP5
+ pclmulqdq %%XTMP4, %%XTMP2, 0x10
+ pslldq %%XTMP4, 4 ; shift-L 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ pxor %%XTMP4, %%XTMP3 ; second phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa %%AAD_HASH, %%XTMP1
+ pxor %%AAD_HASH, %%XTMP4 ; the result is in %%T1
+
+ or %%T2, %%T2
+ je %%_CALC_AAD_done
+
+%%_get_small_AAD_block:
+ movdqu %%XTMP0, [%%GDATA_KEY + HashKey]
+ READ_SMALL_DATA_INPUT %%XTMP1, %%T1, %%T2, %%T3, %%T4, %%T5
+ ;byte-reflect the AAD data
+ pshufb %%XTMP1, [rel SHUF_MASK]
+ pxor %%AAD_HASH, %%XTMP1
+ GHASH_MUL %%AAD_HASH, %%XTMP0, %%XTMP1, %%XTMP2, %%XTMP3, %%XTMP4, %%XTMP5
+
+%%_CALC_AAD_done:
+
+%endmacro ; CALC_AAD_HASH
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks between update calls.
+; Requires the input data be at least 1 byte long.
+; Input: gcm_key_data (GDATA_KEY), gcm_context_data (GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN), the current data offset (DATA_OFFSET),
+; and whether encoding or decoding (ENC_DEC).
+; Output: A cypher of the first partial block (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10, r12, r13, r15, xmm0, xmm1, xmm2, xmm3, xmm5, xmm6, xmm9, xmm10, xmm11, xmm13
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro PARTIAL_BLOCK 8
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%DATA_OFFSET %6
+%define %%AAD_HASH %7
+%define %%ENC_DEC %8
+ mov r13, [%%GDATA_CTX + PBlockLen]
+ cmp r13, 0
+ je %%_partial_block_done ;Leave Macro if no partial blocks
+
+ cmp %%PLAIN_CYPH_LEN, 16 ;Read in input data without over reading
+ jl %%_fewer_than_16_bytes
+ XLDR xmm1, [%%PLAIN_CYPH_IN] ;If more than 16 bytes of data, just fill the xmm register
+ jmp %%_data_read
+
+%%_fewer_than_16_bytes:
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ READ_SMALL_DATA_INPUT xmm1, r10, %%PLAIN_CYPH_LEN, rax, r12, r15
+ mov r13, [%%GDATA_CTX + PBlockLen]
+
+%%_data_read: ;Finished reading in data
+
+
+ movdqu xmm9, [%%GDATA_CTX + PBlockEncKey] ;xmm9 = ctx_data.partial_block_enc_key
+ movdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ lea r12, [SHIFT_MASK]
+
+ add r12, r13 ; adjust the shuffle mask pointer to be able to shift r13 bytes (16-r13 is the number of bytes in plaintext mod 16)
+ movdqu xmm2, [r12] ; get the appropriate shuffle mask
+ pshufb xmm9, xmm2 ;shift right r13 bytes
+
+%ifidn %%ENC_DEC, DEC
+ movdqa xmm3, xmm1
+ pxor xmm9, xmm1 ; Cyphertext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_1 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_1:
+
+ movdqu xmm1, [r12 + ALL_F-SHIFT_MASK] ; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ pand xmm3, xmm1
+ pshufb xmm3, [SHUF_MASK]
+ pshufb xmm3, xmm2
+ pxor %%AAD_HASH, xmm3
+
+
+ cmp r15,0
+ jl %%_partial_incomplete_1
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_dec_done
+%%_partial_incomplete_1:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_dec_done:
+ movdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+%else
+ pxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+
+ mov r15, %%PLAIN_CYPH_LEN
+ add r15, r13
+ sub r15, 16 ;Set r15 to be the amount of data left in CYPH_PLAIN_IN after filling the block
+ jge %%_no_extra_mask_2 ;Determine if if partial block is not being filled and shift mask accordingly
+ sub r12, r15
+%%_no_extra_mask_2:
+
+ movdqu xmm1, [r12 + ALL_F-SHIFT_MASK] ; get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand xmm9, xmm1 ; mask out bottom r13 bytes of xmm9
+
+ pshufb xmm9, [SHUF_MASK]
+ pshufb xmm9, xmm2
+ pxor %%AAD_HASH, xmm9
+
+ cmp r15,0
+ jl %%_partial_incomplete_2
+
+ GHASH_MUL %%AAD_HASH, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ xor rax,rax
+ mov [%%GDATA_CTX + PBlockLen], rax
+ jmp %%_encode_done
+%%_partial_incomplete_2:
+%ifidn __OUTPUT_FORMAT__, win64
+ mov rax, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + PBlockLen], rax
+%else
+ add [%%GDATA_CTX + PBlockLen], %%PLAIN_CYPH_LEN
+%endif
+%%_encode_done:
+ movdqu [%%GDATA_CTX + AadHash], %%AAD_HASH
+
+ pshufb xmm9, [SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+ pshufb xmm9, xmm2
+%endif
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output encrypted Bytes
+ cmp r15,0
+ jl %%_partial_fill
+ mov r12, r13
+ mov r13, 16
+ sub r13, r12 ; Set r13 to be the number of bytes to write out
+ jmp %%_count_set
+%%_partial_fill:
+ mov r13, %%PLAIN_CYPH_LEN
+%%_count_set:
+ movq rax, xmm9
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT+ %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ psrldq xmm9, 8
+ movq rax, xmm9
+ sub r13, 8
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%%_partial_block_done:
+%endmacro ; PARTIAL_BLOCK
+
+
+; if a = number of total plaintext bytes
+; b = floor(a/16)
+; %%num_initial_blocks = b mod 8;
+; encrypt the initial %%num_initial_blocks blocks and apply ghash on the ciphertext
+; %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r14 are used as a pointer only, not modified
+; Updated AAD_HASH is returned in %%T3
+
+%macro INITIAL_BLOCKS 24
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%LENGTH %5
+%define %%DATA_OFFSET %6
+%define %%num_initial_blocks %7 ; can be 0, 1, 2, 3, 4, 5, 6 or 7
+%define %%T1 %8
+%define %%HASH_KEY %9
+%define %%T3 %10
+%define %%T4 %11
+%define %%T5 %12
+%define %%CTR %13
+%define %%XMM1 %14
+%define %%XMM2 %15
+%define %%XMM3 %16
+%define %%XMM4 %17
+%define %%XMM5 %18
+%define %%XMM6 %19
+%define %%XMM7 %20
+%define %%XMM8 %21
+%define %%T6 %22
+%define %%T_key %23
+%define %%ENC_DEC %24
+
+%assign i (8-%%num_initial_blocks)
+ movdqu reg(i), %%XMM8 ; move AAD_HASH to temp reg
+
+ ; start AES for %%num_initial_blocks blocks
+ movdqu %%CTR, [%%GDATA_CTX + CurCount] ; %%CTR = Y0
+
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa reg(i), %%CTR
+ pshufb reg(i), [SHUF_MASK] ; perform a 16Byte swap
+%assign i (i+1)
+%endrep
+
+movdqu %%T_key, [%%GDATA_KEY+16*0]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ pxor reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j 1
+%rep NROUNDS ; encrypt N blocks with 13 key rounds (11 for GCM192)
+movdqu %%T_key, [%%GDATA_KEY+16*j]
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ aesenc reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign j (j+1)
+%endrep
+
+
+movdqu %%T_key, [%%GDATA_KEY+16*j] ; encrypt with last (14th) key round (12 for GCM192)
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ aesenclast reg(i),%%T_key
+%assign i (i+1)
+%endrep
+
+%assign i (9-%%num_initial_blocks)
+%rep %%num_initial_blocks
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ pxor reg(i), %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], reg(i) ; write back ciphertext for %%num_initial_blocks blocks
+ add %%DATA_OFFSET, 16
+ %ifidn %%ENC_DEC, DEC
+ movdqa reg(i), %%T1
+ %endif
+ pshufb reg(i), [SHUF_MASK] ; prepare ciphertext for GHASH computations
+%assign i (i+1)
+%endrep
+
+
+%assign i (8-%%num_initial_blocks)
+%assign j (9-%%num_initial_blocks)
+
+%rep %%num_initial_blocks
+ pxor reg(j), reg(i)
+ GHASH_MUL reg(j), %%HASH_KEY, %%T1, %%T3, %%T4, %%T5, %%T6 ; apply GHASH on %%num_initial_blocks blocks
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+ ; %%XMM8 has the current Hash Value
+ movdqa %%T3, %%XMM8
+
+ cmp %%LENGTH, 128
+ jl %%_initial_blocks_done
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Prepare 8 counter blocks and perform rounds of AES cipher on them, load plain/cipher text and store cipher/plain text.
+; Keep 8 cipher text blocks for further GHASH computations (XMM1 - XMM8)
+; - combine current GHASH value into block 0 (XMM1)
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM1, %%CTR
+ pshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM2, %%CTR
+ pshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM3, %%CTR
+ pshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM4, %%CTR
+ pshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM5, %%CTR
+ pshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM6, %%CTR
+ pshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM7, %%CTR
+ pshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+
+ paddd %%CTR, [ONE] ; INCR Y0
+ movdqa %%XMM8, %%CTR
+ pshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+
+ movdqu %%T_key, [%%GDATA_KEY+16*0]
+ pxor %%XMM1, %%T_key
+ pxor %%XMM2, %%T_key
+ pxor %%XMM3, %%T_key
+ pxor %%XMM4, %%T_key
+ pxor %%XMM5, %%T_key
+ pxor %%XMM6, %%T_key
+ pxor %%XMM7, %%T_key
+ pxor %%XMM8, %%T_key
+
+
+%assign i 1
+%rep NROUNDS ; do early (13) rounds (11 for GCM192)
+ movdqu %%T_key, [%%GDATA_KEY+16*i]
+ aesenc %%XMM1, %%T_key
+ aesenc %%XMM2, %%T_key
+ aesenc %%XMM3, %%T_key
+ aesenc %%XMM4, %%T_key
+ aesenc %%XMM5, %%T_key
+ aesenc %%XMM6, %%T_key
+ aesenc %%XMM7, %%T_key
+ aesenc %%XMM8, %%T_key
+%assign i (i+1)
+%endrep
+
+
+ movdqu %%T_key, [%%GDATA_KEY+16*i] ; do final key round
+ aesenclast %%XMM1, %%T_key
+ aesenclast %%XMM2, %%T_key
+ aesenclast %%XMM3, %%T_key
+ aesenclast %%XMM4, %%T_key
+ aesenclast %%XMM5, %%T_key
+ aesenclast %%XMM6, %%T_key
+ aesenclast %%XMM7, %%T_key
+ aesenclast %%XMM8, %%T_key
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*0]
+ pxor %%XMM1, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*0], %%XMM1
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM1, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*1]
+ pxor %%XMM2, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*1], %%XMM2
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM2, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*2]
+ pxor %%XMM3, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*2], %%XMM3
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM3, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*3]
+ pxor %%XMM4, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*3], %%XMM4
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM4, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*4]
+ pxor %%XMM5, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*4], %%XMM5
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM5, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*5]
+ pxor %%XMM6, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*5], %%XMM6
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM6, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*6]
+ pxor %%XMM7, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*6], %%XMM7
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM7, %%T1
+ %endif
+
+ XLDR %%T1, [%%PLAIN_CYPH_IN + %%DATA_OFFSET + 16*7]
+ pxor %%XMM8, %%T1
+ XSTR [%%CYPH_PLAIN_OUT + %%DATA_OFFSET + 16*7], %%XMM8
+ %ifidn %%ENC_DEC, DEC
+ movdqa %%XMM8, %%T1
+ %endif
+
+ add %%DATA_OFFSET, 128
+
+ pshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ pxor %%XMM1, %%T3 ; combine GHASHed value with the corresponding ciphertext
+ pshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_initial_blocks_done:
+
+
+%endmacro
+
+
+
+; encrypt 8 blocks at a time
+; ghash the 8 previously encrypted ciphertext blocks
+; %%GDATA (KEY), %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN are used as pointers only, not modified
+; %%DATA_OFFSET is the data offset value
+%macro GHASH_8_ENCRYPT_8_PARALLEL 22
+%define %%GDATA %1
+%define %%CYPH_PLAIN_OUT %2
+%define %%PLAIN_CYPH_IN %3
+%define %%DATA_OFFSET %4
+%define %%T1 %5
+%define %%T2 %6
+%define %%T3 %7
+%define %%T4 %8
+%define %%T5 %9
+%define %%T6 %10
+%define %%CTR %11
+%define %%XMM1 %12
+%define %%XMM2 %13
+%define %%XMM3 %14
+%define %%XMM4 %15
+%define %%XMM5 %16
+%define %%XMM6 %17
+%define %%XMM7 %18
+%define %%XMM8 %19
+%define %%T7 %20
+%define %%loop_idx %21
+%define %%ENC_DEC %22
+
+ movdqa %%T7, %%XMM1
+ movdqu [rsp + TMP2], %%XMM2
+ movdqu [rsp + TMP3], %%XMM3
+ movdqu [rsp + TMP4], %%XMM4
+ movdqu [rsp + TMP5], %%XMM5
+ movdqu [rsp + TMP6], %%XMM6
+ movdqu [rsp + TMP7], %%XMM7
+ movdqu [rsp + TMP8], %%XMM8
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Karatsuba Method
+
+ movdqa %%T4, %%T7
+ pshufd %%T6, %%T7, 01001110b
+ pxor %%T6, %%T7
+ %ifidn %%loop_idx, in_order
+ paddd %%CTR, [ONE] ; INCR CNT
+ %else
+ paddd %%CTR, [ONEf] ; INCR CNT
+ %endif
+ movdqu %%T5, [%%GDATA + HashKey_8]
+ pclmulqdq %%T4, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T7, %%T5, 0x00 ; %%T7 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_8_k]
+ pclmulqdq %%T6, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ movdqa %%XMM1, %%CTR
+
+ %ifidn %%loop_idx, in_order
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM2, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM3, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM4, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM5, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM6, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM7, %%CTR
+
+ paddd %%CTR, [ONE] ; INCR CNT
+ movdqa %%XMM8, %%CTR
+
+ pshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+ %else
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM2, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM3, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM4, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM5, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM6, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM7, %%CTR
+
+ paddd %%CTR, [ONEf] ; INCR CNT
+ movdqa %%XMM8, %%CTR
+ %endif
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ movdqu %%T1, [%%GDATA + 16*0]
+ pxor %%XMM1, %%T1
+ pxor %%XMM2, %%T1
+ pxor %%XMM3, %%T1
+ pxor %%XMM4, %%T1
+ pxor %%XMM5, %%T1
+ pxor %%XMM6, %%T1
+ pxor %%XMM7, %%T1
+ pxor %%XMM8, %%T1
+
+ ;; %%XMM6, %%T5 hold the values for the two operands which are carry-less multiplied
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Karatsuba Method
+ movdqu %%T1, [rsp + TMP2]
+ movdqa %%T3, %%T1
+
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_7]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_7_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+ movdqu %%T1, [%%GDATA + 16*1]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+
+ movdqu %%T1, [%%GDATA + 16*2]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; Karatsuba Method
+ movdqu %%T1, [rsp + TMP3]
+ movdqa %%T3, %%T1
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_6]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_6_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+ movdqu %%T1, [%%GDATA + 16*3]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [rsp + TMP4]
+ movdqa %%T3, %%T1
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_5]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_5_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+ movdqu %%T1, [%%GDATA + 16*4]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*5]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [rsp + TMP5]
+ movdqa %%T3, %%T1
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_4]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_4_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+
+ movdqu %%T1, [%%GDATA + 16*6]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+ movdqu %%T1, [rsp + TMP6]
+ movdqa %%T3, %%T1
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_3]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_3_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+ movdqu %%T1, [%%GDATA + 16*7]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [rsp + TMP7]
+ movdqa %%T3, %%T1
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey_2]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_2_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T4, %%T1 ; accumulate the results in %%T4:%%T7, %%T6 holds the middle part
+ pxor %%T7, %%T3
+ pxor %%T6, %%T2
+
+ movdqu %%T1, [%%GDATA + 16*8]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+
+ ;; %%XMM8, %%T5 hold the values for the two operands which are carry-less multiplied
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; Karatsuba Method
+ movdqu %%T1, [rsp + TMP8]
+ movdqa %%T3, %%T1
+
+ pshufd %%T2, %%T3, 01001110b
+ pxor %%T2, %%T3
+ movdqu %%T5, [%%GDATA + HashKey]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+ pclmulqdq %%T3, %%T5, 0x00 ; %%T3 = a0*b0
+ movdqu %%T5, [%%GDATA + HashKey_k]
+ pclmulqdq %%T2, %%T5, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+ pxor %%T7, %%T3
+ pxor %%T4, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*9]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+
+%ifdef GCM128_MODE
+ movdqu %%T5, [%%GDATA + 16*10]
+%endif
+%ifdef GCM192_MODE
+ movdqu %%T1, [%%GDATA + 16*10]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*11]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T5, [%%GDATA + 16*12] ; finish last key round
+%endif
+%ifdef GCM256_MODE
+ movdqu %%T1, [%%GDATA + 16*10]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*11]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*12]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T1, [%%GDATA + 16*13]
+ aesenc %%XMM1, %%T1
+ aesenc %%XMM2, %%T1
+ aesenc %%XMM3, %%T1
+ aesenc %%XMM4, %%T1
+ aesenc %%XMM5, %%T1
+ aesenc %%XMM6, %%T1
+ aesenc %%XMM7, %%T1
+ aesenc %%XMM8, %%T1
+
+ movdqu %%T5, [%%GDATA + 16*14] ; finish last key round
+%endif
+
+%assign i 0
+%assign j 1
+%rep 8
+ XLDR %%T1, [%%PLAIN_CYPH_IN+%%DATA_OFFSET+16*i]
+
+%ifidn %%ENC_DEC, DEC
+ movdqa %%T3, %%T1
+%endif
+
+ pxor %%T1, %%T5
+ aesenclast reg(j), %%T1 ; XMM1:XMM8
+ XSTR [%%CYPH_PLAIN_OUT+%%DATA_OFFSET+16*i], reg(j) ; Write to the Output buffer
+
+%ifidn %%ENC_DEC, DEC
+ movdqa reg(j), %%T3
+%endif
+%assign i (i+1)
+%assign j (j+1)
+%endrep
+
+
+
+
+ pxor %%T2, %%T6
+ pxor %%T2, %%T4
+ pxor %%T2, %%T7
+
+
+ movdqa %%T3, %%T2
+ pslldq %%T3, 8 ; shift-L %%T3 2 DWs
+ psrldq %%T2, 8 ; shift-R %%T2 2 DWs
+ pxor %%T7, %%T3
+ pxor %%T4, %%T2 ; accumulate the results in %%T4:%%T7
+
+
+
+ ;first phase of the reduction
+ movdqa %%T2, %%T7
+ movdqa %%T3, %%T7
+ movdqa %%T1, %%T7 ; move %%T7 into %%T2, %%T3, %%T1 in order to perform the three shifts independently
+
+ pslld %%T2, 31 ; packed right shifting << 31
+ pslld %%T3, 30 ; packed right shifting shift << 30
+ pslld %%T1, 25 ; packed right shifting shift << 25
+ pxor %%T2, %%T3 ; xor the shifted versions
+ pxor %%T2, %%T1
+
+ movdqa %%T5, %%T2
+ psrldq %%T5, 4 ; shift-R %%T5 1 DW
+
+ pslldq %%T2, 12 ; shift-L %%T2 3 DWs
+ pxor %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ pshufb %%XMM1, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM2, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM3, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM4, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM5, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM6, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM7, [SHUF_MASK] ; perform a 16Byte swap
+ pshufb %%XMM8, [SHUF_MASK] ; perform a 16Byte swap
+
+ ;second phase of the reduction
+ movdqa %%T2,%%T7 ; make 3 copies of %%T7 (in in %%T2, %%T3, %%T1) for doing three shift operations
+ movdqa %%T3,%%T7
+ movdqa %%T1,%%T7
+
+ psrld %%T2,1 ; packed left shifting >> 1
+ psrld %%T3,2 ; packed left shifting >> 2
+ psrld %%T1,7 ; packed left shifting >> 7
+ pxor %%T2,%%T3 ; xor the shifted versions
+ pxor %%T2,%%T1
+
+ pxor %%T2, %%T5
+ pxor %%T7, %%T2
+ pxor %%T7, %%T4 ; the result is in %%T4
+
+
+ pxor %%XMM1, %%T7
+
+%endmacro
+
+
+; GHASH the last 4 ciphertext blocks.
+%macro GHASH_LAST_8 16
+%define %%GDATA %1
+%define %%T1 %2
+%define %%T2 %3
+%define %%T3 %4
+%define %%T4 %5
+%define %%T5 %6
+%define %%T6 %7
+%define %%T7 %8
+%define %%XMM1 %9
+%define %%XMM2 %10
+%define %%XMM3 %11
+%define %%XMM4 %12
+%define %%XMM5 %13
+%define %%XMM6 %14
+%define %%XMM7 %15
+%define %%XMM8 %16
+
+ ; Karatsuba Method
+ movdqa %%T6, %%XMM1
+ pshufd %%T2, %%XMM1, 01001110b
+ pxor %%T2, %%XMM1
+ movdqu %%T5, [%%GDATA + HashKey_8]
+ pclmulqdq %%T6, %%T5, 0x11 ; %%T6 = a1*b1
+
+ pclmulqdq %%XMM1, %%T5, 0x00 ; %%XMM1 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_8_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ movdqa %%T7, %%XMM1
+ movdqa %%XMM1, %%T2 ; result in %%T6, %%T7, %%XMM1
+
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM2
+ pshufd %%T2, %%XMM2, 01001110b
+ pxor %%T2, %%XMM2
+ movdqu %%T5, [%%GDATA + HashKey_7]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM2, %%T5, 0x00 ; %%XMM2 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_7_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM2
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM3
+ pshufd %%T2, %%XMM3, 01001110b
+ pxor %%T2, %%XMM3
+ movdqu %%T5, [%%GDATA + HashKey_6]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM3, %%T5, 0x00 ; %%XMM3 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_6_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM3
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM4
+ pshufd %%T2, %%XMM4, 01001110b
+ pxor %%T2, %%XMM4
+ movdqu %%T5, [%%GDATA + HashKey_5]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM4, %%T5, 0x00 ; %%XMM3 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_5_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM4
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM5
+ pshufd %%T2, %%XMM5, 01001110b
+ pxor %%T2, %%XMM5
+ movdqu %%T5, [%%GDATA + HashKey_4]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM5, %%T5, 0x00 ; %%XMM3 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_4_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM5
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM6
+ pshufd %%T2, %%XMM6, 01001110b
+ pxor %%T2, %%XMM6
+ movdqu %%T5, [%%GDATA + HashKey_3]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM6, %%T5, 0x00 ; %%XMM3 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_3_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM6
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM7
+ pshufd %%T2, %%XMM7, 01001110b
+ pxor %%T2, %%XMM7
+ movdqu %%T5, [%%GDATA + HashKey_2]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM7, %%T5, 0x00 ; %%XMM3 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_2_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM7
+ pxor %%XMM1, %%T2 ; results accumulated in %%T6, %%T7, %%XMM1
+
+
+ ; Karatsuba Method
+ movdqa %%T1, %%XMM8
+ pshufd %%T2, %%XMM8, 01001110b
+ pxor %%T2, %%XMM8
+ movdqu %%T5, [%%GDATA + HashKey]
+ pclmulqdq %%T1, %%T5, 0x11 ; %%T1 = a1*b1
+
+ pclmulqdq %%XMM8, %%T5, 0x00 ; %%XMM4 = a0*b0
+ movdqu %%T4, [%%GDATA + HashKey_k]
+ pclmulqdq %%T2, %%T4, 0x00 ; %%T2 = (a1+a0)*(b1+b0)
+
+ pxor %%T6, %%T1
+ pxor %%T7, %%XMM8
+ pxor %%T2, %%XMM1
+ pxor %%T2, %%T6
+ pxor %%T2, %%T7 ; middle section of the temp results combined as in Karatsuba algorithm
+
+
+ movdqa %%T4, %%T2
+ pslldq %%T4, 8 ; shift-L %%T4 2 DWs
+ psrldq %%T2, 8 ; shift-R %%T2 2 DWs
+ pxor %%T7, %%T4
+ pxor %%T6, %%T2 ; <%%T6:%%T7> holds the result of the accumulated carry-less multiplications
+
+
+ ;first phase of the reduction
+ movdqa %%T2, %%T7
+ movdqa %%T3, %%T7
+ movdqa %%T4, %%T7 ; move %%T7 into %%T2, %%T3, %%T4 in order to perform the three shifts independently
+
+ pslld %%T2, 31 ; packed right shifting << 31
+ pslld %%T3, 30 ; packed right shifting shift << 30
+ pslld %%T4, 25 ; packed right shifting shift << 25
+ pxor %%T2, %%T3 ; xor the shifted versions
+ pxor %%T2, %%T4
+
+ movdqa %%T1, %%T2
+ psrldq %%T1, 4 ; shift-R %%T1 1 DW
+
+ pslldq %%T2, 12 ; shift-L %%T2 3 DWs
+ pxor %%T7, %%T2 ; first phase of the reduction complete
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;second phase of the reduction
+ movdqa %%T2,%%T7 ; make 3 copies of %%T7 (in in %%T2, %%T3, %%T4) for doing three shift operations
+ movdqa %%T3,%%T7
+ movdqa %%T4,%%T7
+
+ psrld %%T2,1 ; packed left shifting >> 1
+ psrld %%T3,2 ; packed left shifting >> 2
+ psrld %%T4,7 ; packed left shifting >> 7
+ pxor %%T2,%%T3 ; xor the shifted versions
+ pxor %%T2,%%T4
+
+ pxor %%T2, %%T1
+ pxor %%T7, %%T2
+ pxor %%T6, %%T7 ; the result is in %%T6
+
+%endmacro
+
+; Encryption of a single block
+%macro ENCRYPT_SINGLE_BLOCK 3
+%define %%GDATA %1
+%define %%ST %2
+%define %%T1 %3
+ movdqu %%T1, [%%GDATA+16*0]
+ pxor %%ST, %%T1
+%assign i 1
+%rep NROUNDS
+ movdqu %%T1, [%%GDATA+16*i]
+ aesenc %%ST, %%T1
+%assign i (i+1)
+%endrep
+ movdqu %%T1, [%%GDATA+16*i]
+ aesenclast %%ST, %%T1
+%endmacro
+
+
+;; Start of Stack Setup
+
+%macro FUNC_SAVE 0
+ ;; Required for Update/GCM_ENC
+ ;the number of pushes must equal STACK_OFFSET
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ movdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+ movdqu [rsp + LOCAL_STORAGE + 1*16],xmm7
+ movdqu [rsp + LOCAL_STORAGE + 2*16],xmm8
+ movdqu [rsp + LOCAL_STORAGE + 3*16],xmm9
+ movdqu [rsp + LOCAL_STORAGE + 4*16],xmm10
+ movdqu [rsp + LOCAL_STORAGE + 5*16],xmm11
+ movdqu [rsp + LOCAL_STORAGE + 6*16],xmm12
+ movdqu [rsp + LOCAL_STORAGE + 7*16],xmm13
+ movdqu [rsp + LOCAL_STORAGE + 8*16],xmm14
+ movdqu [rsp + LOCAL_STORAGE + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm15 , [rsp + LOCAL_STORAGE + 9*16]
+ movdqu xmm14 , [rsp + LOCAL_STORAGE + 8*16]
+ movdqu xmm13 , [rsp + LOCAL_STORAGE + 7*16]
+ movdqu xmm12 , [rsp + LOCAL_STORAGE + 6*16]
+ movdqu xmm11 , [rsp + LOCAL_STORAGE + 5*16]
+ movdqu xmm10 , [rsp + LOCAL_STORAGE + 4*16]
+ movdqu xmm9 , [rsp + LOCAL_STORAGE + 3*16]
+ movdqu xmm8 , [rsp + LOCAL_STORAGE + 2*16]
+ movdqu xmm7 , [rsp + LOCAL_STORAGE + 1*16]
+ movdqu xmm6 , [rsp + LOCAL_STORAGE + 0*16]
+%endif
+
+;; Required for Update/GCM_ENC
+ mov rsp, r14
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_INIT initializes a gcm_context_data struct to prepare for encoding/decoding.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data *(GDATA_CTX), IV,
+; Additional Authentication data (A_IN), Additional Data length (A_LEN).
+; Output: Updated GDATA_CTX with the hash of A_IN (AadHash) and initialized other parts of GDATA.
+; Clobbers rax, r10-r13 and xmm0-xmm6
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_INIT 5
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%IV %3
+%define %%A_IN %4
+%define %%A_LEN %5
+%define %%AAD_HASH xmm0
+
+ CALC_AAD_HASH %%A_IN, %%A_LEN, %%AAD_HASH, %%GDATA_KEY, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, r10, r11, r12, r13, rax
+ pxor xmm2, xmm3
+ mov r10, %%A_LEN
+
+ movdqu [%%GDATA_CTX + AadHash], %%AAD_HASH ; ctx_data.aad hash = aad_hash
+ mov [%%GDATA_CTX + AadLen], r10 ; ctx_data.aad_length = aad_length
+ xor r10, r10
+ mov [%%GDATA_CTX + InLen], r10 ; ctx_data.in_length = 0
+ mov [%%GDATA_CTX + PBlockLen], r10 ; ctx_data.partial_block_length = 0
+ movdqu [%%GDATA_CTX + PBlockEncKey], xmm2 ; ctx_data.partial_block_enc_key = 0
+ mov r10, %%IV
+ movdqa xmm2, [rel ONEf] ; read 12 IV bytes and pad with 0x00000001
+ pinsrq xmm2, [r10], 0
+ pinsrd xmm2, [r10+8], 2
+ movdqu [%%GDATA_CTX + OrigIV], xmm2 ; ctx_data.orig_IV = iv
+
+ pshufb xmm2, [SHUF_MASK]
+
+ movdqu [%%GDATA_CTX + CurCount], xmm2 ; ctx_data.current_counter = iv
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context_data
+; struct has been initialized by GCM_INIT.
+; Requires the input data be at least 1 byte long because of READ_SMALL_INPUT_DATA.
+; Input: gcm_key_data * (GDATA_KEY), gcm_context_data (GDATA_CTX), input text (PLAIN_CYPH_IN),
+; input text length (PLAIN_CYPH_LEN) and whether encoding or decoding (ENC_DEC)
+; Output: A cypher of the given plain text (CYPH_PLAIN_OUT), and updated GDATA_CTX
+; Clobbers rax, r10-r15, and xmm0-xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_ENC_DEC 6
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%CYPH_PLAIN_OUT %3
+%define %%PLAIN_CYPH_IN %4
+%define %%PLAIN_CYPH_LEN %5
+%define %%ENC_DEC %6
+%define %%DATA_OFFSET r11
+
+; Macro flow:
+; calculate the number of 16byte blocks in the message
+; process (number of 16byte blocks) mod 8 '%%_initial_num_blocks_is_# .. %%_initial_blocks_encrypted'
+; process 8 16 byte blocks at a time until all are done '%%_encrypt_by_8_new .. %%_eight_cipher_left'
+; if there is a block of less tahn 16 bytes process it '%%_zero_cipher_left .. %%_multiple_of_16_bytes'
+
+ cmp %%PLAIN_CYPH_LEN, 0
+ je %%_multiple_of_16_bytes
+
+ xor %%DATA_OFFSET, %%DATA_OFFSET
+%ifidn __OUTPUT_FORMAT__, win64
+ mov r12, %%PLAIN_CYPH_LEN
+ add [%%GDATA_CTX + InLen], r12 ;Update length of data processed
+%else
+ add [%%GDATA_CTX + InLen], %%PLAIN_CYPH_LEN ;Update length of data processed
+%endif
+ movdqu xmm13, [%%GDATA_KEY + HashKey] ; xmm13 = HashKey
+ movdqu xmm8, [%%GDATA_CTX + AadHash]
+
+
+ PARTIAL_BLOCK %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%PLAIN_CYPH_LEN, %%DATA_OFFSET, xmm8, %%ENC_DEC
+
+ mov r13, %%PLAIN_CYPH_LEN ; save the number of bytes of plaintext/ciphertext
+ sub r13, %%DATA_OFFSET
+ mov r10, r13 ;save the amount of data left to process in r10
+ and r13, -16 ; r13 = r13 - (r13 mod 16)
+
+ mov r12, r13
+ shr r12, 4
+ and r12, 7
+ jz %%_initial_num_blocks_is_0
+
+ cmp r12, 7
+ je %%_initial_num_blocks_is_7
+ cmp r12, 6
+ je %%_initial_num_blocks_is_6
+ cmp r12, 5
+ je %%_initial_num_blocks_is_5
+ cmp r12, 4
+ je %%_initial_num_blocks_is_4
+ cmp r12, 3
+ je %%_initial_num_blocks_is_3
+ cmp r12, 2
+ je %%_initial_num_blocks_is_2
+
+ jmp %%_initial_num_blocks_is_1
+
+%%_initial_num_blocks_is_7:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 7, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*7
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_6:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 6, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*6
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_5:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 5, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*5
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_4:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 4, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*4
+ jmp %%_initial_blocks_encrypted
+
+
+%%_initial_num_blocks_is_3:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 3, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*3
+ jmp %%_initial_blocks_encrypted
+%%_initial_num_blocks_is_2:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 2, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16*2
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_1:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 1, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+ sub r13, 16
+ jmp %%_initial_blocks_encrypted
+
+%%_initial_num_blocks_is_0:
+ INITIAL_BLOCKS %%GDATA_KEY, %%GDATA_CTX, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, r13, %%DATA_OFFSET, 0, xmm12, xmm13, xmm14, xmm15, xmm11, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm10, xmm0, %%ENC_DEC
+
+
+%%_initial_blocks_encrypted:
+ cmp r13, 0
+ je %%_zero_cipher_left
+
+ sub r13, 128
+ je %%_eight_cipher_left
+
+
+
+
+ movd r15d, xmm9
+ and r15d, 255
+ pshufb xmm9, [SHUF_MASK]
+
+
+%%_encrypt_by_8_new:
+ cmp r15d, 255-8
+ jg %%_encrypt_by_8
+
+
+
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, out_order, %%ENC_DEC
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ jne %%_encrypt_by_8_new
+
+ pshufb xmm9, [SHUF_MASK]
+ jmp %%_eight_cipher_left
+
+%%_encrypt_by_8:
+ pshufb xmm9, [SHUF_MASK]
+ add r15b, 8
+ GHASH_8_ENCRYPT_8_PARALLEL %%GDATA_KEY, %%CYPH_PLAIN_OUT, %%PLAIN_CYPH_IN, %%DATA_OFFSET, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm9, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm15, in_order, %%ENC_DEC
+ pshufb xmm9, [SHUF_MASK]
+ add %%DATA_OFFSET, 128
+ sub r13, 128
+ jne %%_encrypt_by_8_new
+
+ pshufb xmm9, [SHUF_MASK]
+
+
+
+
+%%_eight_cipher_left:
+ GHASH_LAST_8 %%GDATA_KEY, xmm0, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8
+
+
+%%_zero_cipher_left:
+ movdqu [%%GDATA_CTX + AadHash], xmm14
+ movdqu [%%GDATA_CTX + CurCount], xmm9
+
+ mov r13, r10
+ and r13, 15 ; r13 = (%%PLAIN_CYPH_LEN mod 16)
+
+ je %%_multiple_of_16_bytes
+
+ mov [%%GDATA_CTX + PBlockLen], r13 ; my_ctx.data.partial_blck_length = r13
+ ; handle the last <16 Byte block seperately
+
+ paddd xmm9, [ONE] ; INCR CNT to get Yn
+ movdqu [%%GDATA_CTX + CurCount], xmm9 ; my_ctx.data.current_counter = xmm9
+ pshufb xmm9, [SHUF_MASK]
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9, xmm2 ; E(K, Yn)
+ movdqu [%%GDATA_CTX + PBlockEncKey], xmm9 ; my_ctx_data.partial_block_enc_key = xmm9
+
+ cmp %%PLAIN_CYPH_LEN, 16
+ jge %%_large_enough_update
+
+ lea r10, [%%PLAIN_CYPH_IN + %%DATA_OFFSET]
+ READ_SMALL_DATA_INPUT xmm1, r10, r13, r12, r15, rax
+ lea r12, [SHIFT_MASK + 16]
+ sub r12, r13
+ jmp %%_data_read
+
+%%_large_enough_update:
+ sub %%DATA_OFFSET, 16
+ add %%DATA_OFFSET, r13
+
+ movdqu xmm1, [%%PLAIN_CYPH_IN+%%DATA_OFFSET] ; receive the last <16 Byte block
+
+ sub %%DATA_OFFSET, r13
+ add %%DATA_OFFSET, 16
+
+ lea r12, [SHIFT_MASK + 16]
+ sub r12, r13 ; adjust the shuffle mask pointer to be able to shift 16-r13 bytes (r13 is the number of bytes in plaintext mod 16)
+ movdqu xmm2, [r12] ; get the appropriate shuffle mask
+ pshufb xmm1, xmm2 ; shift right 16-r13 bytes
+%%_data_read:
+ %ifidn %%ENC_DEC, DEC
+ movdqa xmm2, xmm1
+ pxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+ movdqu xmm1, [r12 + ALL_F - SHIFT_MASK] ; get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ pand xmm9, xmm1 ; mask out top 16-r13 bytes of xmm9
+ pand xmm2, xmm1
+ pshufb xmm2, [SHUF_MASK]
+ pxor xmm14, xmm2
+ movdqu [%%GDATA_CTX + AadHash], xmm14
+
+ %else
+ pxor xmm9, xmm1 ; Plaintext XOR E(K, Yn)
+ movdqu xmm1, [r12 + ALL_F - SHIFT_MASK] ; get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ pand xmm9, xmm1 ; mask out top 16-r13 bytes of xmm9
+ pshufb xmm9, [SHUF_MASK]
+ pxor xmm14, xmm9
+ movdqu [%%GDATA_CTX + AadHash], xmm14
+
+ pshufb xmm9, [SHUF_MASK] ; shuffle xmm9 back to output as ciphertext
+ %endif
+
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ; output r13 Bytes
+ movq rax, xmm9
+ cmp r13, 8
+ jle %%_less_than_8_bytes_left
+
+ mov [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], rax
+ add %%DATA_OFFSET, 8
+ psrldq xmm9, 8
+ movq rax, xmm9
+ sub r13, 8
+
+%%_less_than_8_bytes_left:
+ mov BYTE [%%CYPH_PLAIN_OUT + %%DATA_OFFSET], al
+ add %%DATA_OFFSET, 1
+ shr rax, 8
+ sub r13, 1
+ jne %%_less_than_8_bytes_left
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%%_multiple_of_16_bytes:
+
+%endmacro
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; GCM_COMPLETE Finishes Encyrption/Decryption of last partial block after GCM_UPDATE finishes.
+; Input: A gcm_key_data * (GDATA_KEY), gcm_context_data * (GDATA_CTX) and
+; whether encoding or decoding (ENC_DEC).
+; Output: Authorization Tag (AUTH_TAG) and Authorization Tag length (AUTH_TAG_LEN)
+; Clobbers rax, r10-r12, and xmm0, xmm1, xmm5, xmm6, xmm9, xmm11, xmm14, xmm15
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%macro GCM_COMPLETE 5
+%define %%GDATA_KEY %1
+%define %%GDATA_CTX %2
+%define %%AUTH_TAG %3
+%define %%AUTH_TAG_LEN %4
+%define %%ENC_DEC %5
+%define %%PLAIN_CYPH_LEN rax
+
+ mov r12, [%%GDATA_CTX + PBlockLen] ; r12 = aadLen (number of bytes)
+ movdqu xmm14, [%%GDATA_CTX + AadHash]
+ movdqu xmm13, [%%GDATA_KEY + HashKey]
+
+ cmp r12, 0
+
+ je %%_partial_done
+
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ;GHASH computation for the last <16 Byte block
+ movdqu [%%GDATA_CTX + AadHash], xmm14
+
+%%_partial_done:
+
+ mov r12, [%%GDATA_CTX + AadLen] ; r12 = aadLen (number of bytes)
+ mov %%PLAIN_CYPH_LEN, [%%GDATA_CTX + InLen]
+
+ shl r12, 3 ; convert into number of bits
+ movd xmm15, r12d ; len(A) in xmm15
+
+ shl %%PLAIN_CYPH_LEN, 3 ; len(C) in bits (*128)
+ movq xmm1, %%PLAIN_CYPH_LEN
+ pslldq xmm15, 8 ; xmm15 = len(A)|| 0x0000000000000000
+ pxor xmm15, xmm1 ; xmm15 = len(A)||len(C)
+
+ pxor xmm14, xmm15
+ GHASH_MUL xmm14, xmm13, xmm0, xmm10, xmm11, xmm5, xmm6 ; final GHASH computation
+ pshufb xmm14, [SHUF_MASK] ; perform a 16Byte swap
+
+ movdqu xmm9, [%%GDATA_CTX + OrigIV] ; xmm9 = Y0
+
+ ENCRYPT_SINGLE_BLOCK %%GDATA_KEY, xmm9, xmm2 ; E(K, Y0)
+
+ pxor xmm9, xmm14
+
+
+
+%%_return_T:
+ mov r10, %%AUTH_TAG ; r10 = authTag
+ mov r11, %%AUTH_TAG_LEN ; r11 = auth_tag_len
+
+ cmp r11, 16
+ je %%_T_16
+
+ cmp r11, 12
+ je %%_T_12
+
+ cmp r11, 8
+ je %%_T_8
+
+ simd_store_sse r10, xmm9, r11, r12, rax
+ jmp %%_return_T_done
+%%_T_8:
+ movq rax, xmm9
+ mov [r10], rax
+ jmp %%_return_T_done
+%%_T_12:
+ movq rax, xmm9
+ mov [r10], rax
+ psrldq xmm9, 8
+ movd eax, xmm9
+ mov [r10 + 8], eax
+ jmp %%_return_T_done
+%%_T_16:
+ movdqu [r10], xmm9
+
+%%_return_T_done:
+
+%ifdef SAFE_DATA
+ ;; Clear sensitive data from context structure
+ pxor xmm0, xmm0
+ movdqu [%%GDATA_CTX + AadHash], xmm0
+ movdqu [%%GDATA_CTX + PBlockEncKey], xmm0
+%endif
+
+%endmacro ;GCM_COMPLETE
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_precomp_128_sse / aes_gcm_precomp_192_sse / aes_gcm_precomp_256_sse
+; (struct gcm_key_data *key_data);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(precomp,_),function,)
+FN_NAME(precomp,_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_precomp
+%endif
+
+ push r12
+ push r13
+ push r14
+ push r15
+
+ mov r14, rsp
+
+
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63 ; align rsp to 64 bytes
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; only xmm6 needs to be maintained
+ movdqu [rsp + LOCAL_STORAGE + 0*16],xmm6
+%endif
+
+ pxor xmm6, xmm6
+ ENCRYPT_SINGLE_BLOCK arg1, xmm6, xmm2 ; xmm6 = HashKey
+
+ pshufb xmm6, [SHUF_MASK]
+ ;;;;;;;;;;;;;;; PRECOMPUTATION of HashKey<<1 mod poly from the HashKey;;;;;;;;;;;;;;;
+ movdqa xmm2, xmm6
+ psllq xmm6, 1
+ psrlq xmm2, 63
+ movdqa xmm1, xmm2
+ pslldq xmm2, 8
+ psrldq xmm1, 8
+ por xmm6, xmm2
+ ;reduction
+ pshufd xmm2, xmm1, 00100100b
+ pcmpeqd xmm2, [TWOONE]
+ pand xmm2, [POLY]
+ pxor xmm6, xmm2 ; xmm6 holds the HashKey<<1 mod poly
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqu [arg1 + HashKey], xmm6 ; store HashKey<<1 mod poly
+
+
+ PRECOMPUTE arg1, xmm6, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm6, [rsp + LOCAL_STORAGE + 0*16]
+%endif
+ mov rsp, r14
+
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+
+exit_precomp:
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_init_128_sse / aes_gcm_init_192_sse / aes_gcm_init_256_sse (
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(init,_),function,)
+FN_NAME(init,_):
+ push r12
+ push r13
+%ifidn __OUTPUT_FORMAT__, win64
+ push r14
+ push r15
+ mov r14, rsp
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 1*16
+ movdqu [rsp + 0*16], xmm6
+%endif
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_init
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_init
+
+ ;; Check IV != NULL
+ cmp arg3, 0
+ jz exit_init
+
+ ;; Check if aad_len == 0
+ cmp arg5, 0
+ jz skip_aad_check_init
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg4, 0
+ jz exit_init
+
+skip_aad_check_init:
+%endif
+ GCM_INIT arg1, arg2, arg3, arg4, arg5
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+exit_init:
+
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm6 , [rsp + 0*16]
+ mov rsp, r14
+ pop r15
+ pop r14
+%endif
+ pop r13
+ pop r12
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_update_sse / aes_gcm_enc_192_update_sse / aes_gcm_enc_256_update_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_update_),function,)
+FN_NAME(enc,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_enc
+
+skip_in_out_check_update_enc:
+%endif
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC
+
+exit_update_enc:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_update_sse / aes_gcm_dec_192_update_sse / aes_gcm_dec_256_update_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_update_),function,)
+FN_NAME(dec,_update_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_update_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_update_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_update_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_update_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_update_dec
+
+skip_in_out_check_update_dec:
+%endif
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC
+
+exit_update_dec:
+ FUNC_RESTORE
+
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_finalize_sse / aes_gcm_enc_192_finalize_sse / aes_gcm_enc_256_finalize_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_finalize_),function,)
+FN_NAME(enc,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_enc_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_enc_fin
+
+ cmp arg4, 16
+ ja exit_enc_fin
+%endif
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ movdqu [rsp + 0*16],xmm6
+ movdqu [rsp + 1*16],xmm9
+ movdqu [rsp + 2*16],xmm11
+ movdqu [rsp + 3*16],xmm14
+ movdqu [rsp + 4*16],xmm15
+%endif
+
+ GCM_COMPLETE arg1, arg2, arg3, arg4, ENC
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm15 , [rsp + 4*16]
+ movdqu xmm14 , [rsp+ 3*16]
+ movdqu xmm11 , [rsp + 2*16]
+ movdqu xmm9 , [rsp + 1*16]
+ movdqu xmm6 , [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+exit_enc_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_finalize_sse / aes_gcm_dec_192_finalize_sse / aes_gcm_dec_256_finalize_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_finalize_),function,)
+FN_NAME(dec,_finalize_):
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec_fin
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag != NULL
+ cmp arg3, 0
+ jz exit_dec_fin
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg4, 0
+ jz exit_dec_fin
+
+ cmp arg4, 16
+ ja exit_dec_fin
+%endif
+
+ push r12
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ sub rsp, 5*16
+ movdqu [rsp + 0*16],xmm6
+ movdqu [rsp + 1*16],xmm9
+ movdqu [rsp + 2*16],xmm11
+ movdqu [rsp + 3*16],xmm14
+ movdqu [rsp + 4*16],xmm15
+%endif
+ GCM_COMPLETE arg1, arg2, arg3, arg4, DEC
+
+%ifdef SAFE_DATA
+ clear_scratch_gps_asm
+ clear_scratch_xmms_sse_asm
+%endif
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm15 , [rsp + 4*16]
+ movdqu xmm14 , [rsp+ 3*16]
+ movdqu xmm11 , [rsp + 2*16]
+ movdqu xmm9 , [rsp + 1*16]
+ movdqu xmm6 , [rsp + 0*16]
+ add rsp, 5*16
+%endif
+
+ pop r12
+
+exit_dec_fin:
+ ret
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_enc_128_sse / aes_gcm_enc_192_sse / aes_gcm_enc_256_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(enc,_),function,)
+FN_NAME(enc,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_enc
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_enc
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_enc
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_enc
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_enc
+
+ cmp arg10, 16
+ ja exit_enc
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_enc
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_enc
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_enc
+
+skip_in_out_check_enc:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_enc
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_enc
+
+skip_aad_check_enc:
+%endif
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, ENC
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, ENC
+
+exit_enc:
+ FUNC_RESTORE
+
+ ret
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;void aes_gcm_dec_128_sse / aes_gcm_dec_192_sse / aes_gcm_dec_256_sse
+; const struct gcm_key_data *key_data,
+; struct gcm_context_data *context_data,
+; u8 *out,
+; const u8 *in,
+; u64 plaintext_len,
+; u8 *iv,
+; const u8 *aad,
+; u64 aad_len,
+; u8 *auth_tag,
+; u64 auth_tag_len);
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+MKGLOBAL(FN_NAME(dec,_),function,)
+FN_NAME(dec,_):
+
+ FUNC_SAVE
+
+%ifdef SAFE_PARAM
+ ;; Check key_data != NULL
+ cmp arg1, 0
+ jz exit_dec
+
+ ;; Check context_data != NULL
+ cmp arg2, 0
+ jz exit_dec
+
+ ;; Check IV != NULL
+ cmp arg6, 0
+ jz exit_dec
+
+ ;; Check auth_tag != NULL
+ cmp arg9, 0
+ jz exit_dec
+
+ ;; Check auth_tag_len == 0 or > 16
+ cmp arg10, 0
+ jz exit_dec
+
+ cmp arg10, 16
+ ja exit_dec
+
+ ;; Check if plaintext_len == 0
+ cmp arg5, 0
+ jz skip_in_out_check_dec
+
+ ;; Check out != NULL (plaintext_len != 0)
+ cmp arg3, 0
+ jz exit_dec
+
+ ;; Check in != NULL (plaintext_len != 0)
+ cmp arg4, 0
+ jz exit_dec
+
+skip_in_out_check_dec:
+ ;; Check if aad_len == 0
+ cmp arg8, 0
+ jz skip_aad_check_dec
+
+ ;; Check aad != NULL (aad_len != 0)
+ cmp arg7, 0
+ jz exit_dec
+
+skip_aad_check_dec:
+%endif
+
+ GCM_INIT arg1, arg2, arg6, arg7, arg8
+
+ GCM_ENC_DEC arg1, arg2, arg3, arg4, arg5, DEC
+
+ GCM_COMPLETE arg1, arg2, arg9, arg10, DEC
+
+exit_dec:
+ FUNC_RESTORE
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/kasumi_sse.c b/src/spdk/intel-ipsec-mb/sse/kasumi_sse.c
new file mode 100644
index 000000000..b1ef71a8a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/kasumi_sse.c
@@ -0,0 +1,385 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <limits.h>
+
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+#include "include/kasumi_internal.h"
+#include "include/save_xmms.h"
+#include "include/clear_regs_mem.h"
+
+#define SAVE_XMMS save_xmms
+#define RESTORE_XMMS restore_xmms
+
+void
+kasumi_f8_1_buffer_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (cipherLengthInBytes == 0 ||
+ cipherLengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_1_buffer(pCtx, IV, pBufferIn, pBufferOut,
+ cipherLengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_1_buffer_bit_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV,
+ const void *pBufferIn, void *pBufferOut,
+ const uint32_t cipherLengthInBits,
+ const uint32_t offsetInBits)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (cipherLengthInBits == 0 ||
+ cipherLengthInBits > KASUMI_MAX_LEN)
+ return;
+#endif
+ kasumi_f8_1_buffer_bit(pCtx, IV, pBufferIn, pBufferOut,
+ cipherLengthInBits, offsetInBits);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_2_buffer_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const void *pBufferIn1,
+ void *pBufferOut1, const uint32_t lengthInBytes1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const uint32_t lengthInBytes2)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes1 == 0 || lengthInBytes1 > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+
+ if (lengthInBytes2 == 0 || lengthInBytes2 > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_2_buffer(pCtx, IV1, IV2,
+ pBufferIn1, pBufferOut1, lengthInBytes1,
+ pBufferIn2, pBufferOut2, lengthInBytes2);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_3_buffer_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV1,
+ const uint64_t IV2, const uint64_t IV3,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const uint32_t lengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ if (pBufferIn3 == NULL || pBufferOut3 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_3_buffer(pCtx, IV1, IV2, IV3,
+ pBufferIn1, pBufferOut1,
+ pBufferIn2, pBufferOut2,
+ pBufferIn3, pBufferOut3, lengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_4_buffer_sse(const kasumi_key_sched_t *pCtx,
+ const uint64_t IV1, const uint64_t IV2,
+ const uint64_t IV3, const uint64_t IV4,
+ const void *pBufferIn1, void *pBufferOut1,
+ const void *pBufferIn2, void *pBufferOut2,
+ const void *pBufferIn3, void *pBufferOut3,
+ const void *pBufferIn4, void *pBufferOut4,
+ const uint32_t lengthInBytes)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL)
+ return;
+
+ if (pBufferIn1 == NULL || pBufferOut1 == NULL)
+ return;
+
+ if (pBufferIn2 == NULL || pBufferOut2 == NULL)
+ return;
+
+ if (pBufferIn3 == NULL || pBufferOut3 == NULL)
+ return;
+
+ if (pBufferIn4 == NULL || pBufferOut4 == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f8_4_buffer(pCtx, IV1, IV2, IV3, IV4,
+ pBufferIn1, pBufferOut1,
+ pBufferIn2, pBufferOut2,
+ pBufferIn3, pBufferOut3,
+ pBufferIn4, pBufferOut4,
+ lengthInBytes);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f8_n_buffer_sse(const kasumi_key_sched_t *pKeySchedule,
+ const uint64_t IV[],
+ const void * const pDataIn[], void *pDataOut[],
+ const uint32_t dataLen[], const uint32_t dataCount)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+ uint32_t numLeft = dataCount;
+ const uint64_t *IVPtr;
+ const void * const *pDataInPtr;
+ void **pDataOutPtr;
+ const uint32_t *dataLenPtr;
+ uint32_t i = 0;
+ uint32_t numBuffs;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKeySchedule == NULL || pDataIn == NULL || pDataOut == NULL ||
+ dataLen == NULL || IV == NULL)
+ return;
+
+ for (i = 0; i < dataCount; i++) {
+ /* Check for NULL pointers */
+ if (pDataIn[i] == NULL || pDataOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (dataLen[i] == 0 || dataLen[i] > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+ }
+#endif
+
+ i = 0;
+
+ /* KASUMI F8 n buffer function can handle up to 16 buffers */
+ while (numLeft > 0) {
+ IVPtr = &IV[i];
+ pDataInPtr = &pDataIn[i];
+ pDataOutPtr = &pDataOut[i];
+ dataLenPtr = &dataLen[i];
+ numBuffs = (numLeft > 16) ? 16 : numLeft;
+
+ kasumi_f8_n_buffer(pKeySchedule, IVPtr, pDataInPtr, pDataOutPtr,
+ dataLenPtr, numBuffs);
+ i += numBuffs;
+ numLeft -= numBuffs;
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+
+void
+kasumi_f9_1_buffer_sse(const kasumi_key_sched_t *pCtx, const void *pBufferIn,
+ const uint32_t lengthInBytes, void *pDigest)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pDigest == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBytes == 0 || lengthInBytes > (KASUMI_MAX_LEN / CHAR_BIT))
+ return;
+#endif
+ kasumi_f9_1_buffer(pCtx, pBufferIn, lengthInBytes, pDigest);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void
+kasumi_f9_1_buffer_user_sse(const kasumi_key_sched_t *pCtx, const uint64_t IV,
+ const void *pBufferIn, const uint32_t lengthInBits,
+ void *pDigest, const uint32_t direction)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pCtx == NULL || pBufferIn == NULL || pDigest == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBits == 0 || lengthInBits > KASUMI_MAX_LEN)
+ return;
+#endif
+ kasumi_f9_1_buffer_user(pCtx, IV, pBufferIn, lengthInBits,
+ pDigest, direction);
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+int
+kasumi_init_f8_key_sched_sse(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_init_f8_key_sched(pKey, pCtx);
+}
+
+int
+kasumi_init_f9_key_sched_sse(const void *const pKey,
+ kasumi_key_sched_t *pCtx)
+{
+ return kasumi_init_f9_key_sched(pKey, pCtx);
+}
+
+size_t
+kasumi_key_sched_size_sse(void)
+{
+ return kasumi_key_sched_size();
+}
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_flush_sse.asm
new file mode 100644
index 000000000..305c80342
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_flush_sse.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4
+%define FLUSH_JOB_AES_ENC flush_job_aes192_enc_sse
+%include "sse/mb_mgr_aes_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_submit_sse.asm
new file mode 100644
index 000000000..c9129e758
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes192_submit_sse.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_192_x4
+%define SUBMIT_JOB_AES_ENC submit_job_aes192_enc_sse
+%include "sse/mb_mgr_aes_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_flush_sse.asm
new file mode 100644
index 000000000..2c8afece9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_flush_sse.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4
+%define FLUSH_JOB_AES_ENC flush_job_aes256_enc_sse
+%include "sse/mb_mgr_aes_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_submit_sse.asm
new file mode 100644
index 000000000..55f7767f4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes256_submit_sse.asm
@@ -0,0 +1,30 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_256_x4
+%define SUBMIT_JOB_AES_ENC submit_job_aes256_enc_sse
+%include "sse/mb_mgr_aes_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_ccm_auth_submit_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_ccm_auth_submit_flush_sse.asm
new file mode 100644
index 000000000..7aca39f25
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_ccm_auth_submit_flush_sse.asm
@@ -0,0 +1,518 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+%include "include/memcpy.asm"
+
+%ifndef AES128_CBC_MAC
+
+%define AES128_CBC_MAC aes128_cbc_mac_x4
+%define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_sse
+%define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_sse
+
+%endif
+
+extern AES128_CBC_MAC
+
+section .data
+default rel
+
+align 16
+len_masks:
+ dq 0x000000000000FFFF, 0x0000000000000000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+counter_mask:
+ dq 0xFFFFFFFFFFFFFF07, 0x0000FFFFFFFFFFFF
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%define NROUNDS 9 ; AES-CCM-128
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+%define tmp4 rax
+%define auth_len_aad rax
+
+%define min_idx rbp
+%define flags rbp
+
+%define lane r8
+
+%define iv_len r9
+%define auth_len r9
+
+%define aad_len r10
+%define init_block_addr r11
+
+%define unused_lanes rbx
+%define r rbx
+
+%define tmp r12
+%define tmp2 r13
+%define tmp3 r14
+
+%define good_lane r15
+%define min_job r15
+
+%define init_block0 xmm0
+%define ccm_lens xmm1
+%define min_len_idx xmm2
+%define xtmp0 xmm3
+%define xtmp1 xmm4
+%define xtmp2 xmm5
+%define xtmp3 xmm6
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+%macro ENCRYPT_SINGLE_BLOCK 2
+%define %%GDATA %1
+%define %%XMM0 %2
+
+ pxor %%XMM0, [%%GDATA+16*0]
+%assign i 1
+%rep NROUNDS
+ aesenc %%XMM0, [%%GDATA+16*i]
+%assign i (i+1)
+%endrep
+ aesenclast %%XMM0, [%%GDATA+16*i]
+%endmacro
+
+;;; ===========================================================================
+;;; AES CCM auth job submit & flush
+;;; ===========================================================================
+;;; SUBMIT_FLUSH [in] - SUBMIT, FLUSH job selection
+%macro GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_SSE 1
+%define %%SUBMIT_FLUSH %1
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ;; Find free lane
+ mov unused_lanes, [state + _aes_ccm_unused_lanes]
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+
+ mov lane, unused_lanes
+ and lane, 15
+ shr unused_lanes, 4
+ mov [state + _aes_ccm_unused_lanes], unused_lanes
+
+ ;; Copy job info into lane
+ mov [state + _aes_ccm_job_in_lane + lane*8], job
+ ;; Copy keys into lane args
+ mov tmp, [job + _aes_enc_key_expanded]
+ mov [state + _aes_ccm_args_keys + lane*8], tmp
+ ;; init_done = 0
+ mov word [state + _aes_ccm_init_done + lane*2], 0
+ lea tmp, [lane * 8]
+
+ pxor init_block0, init_block0
+ movdqa [state + _aes_ccm_args_IV + tmp*2], init_block0
+
+ ;; Prepare initial Block 0 for CBC-MAC-128
+
+ ;; Byte 0: flags with L' and M' (AAD later)
+ ;; Calculate L' = 15 - IV length - 1 = 14 - IV length
+ mov flags, 14
+ mov iv_len, [job + _iv_len_in_bytes]
+ sub flags, iv_len
+ ;; Calculate M' = (Digest length - 2) / 2
+ mov tmp, [job + _auth_tag_output_len_in_bytes]
+ sub tmp, 2
+
+ shl tmp, 2 ; M' << 3 (combine 1xshr, to div by 2, and 3xshl)
+ or flags, tmp
+
+ ;; Bytes 1 - 13: Nonce (7 - 13 bytes long)
+
+ ;; Bytes 1 - 7 are always copied (first 7 bytes)
+ mov tmp, [job + _iv]
+ pinsrb init_block0, [tmp], 1
+ pinsrw init_block0, [tmp + 1], 1
+ pinsrd init_block0, [tmp + 3], 1
+
+ cmp iv_len, 7
+ je %%_finish_nonce_move
+
+ cmp iv_len, 8
+ je %%_iv_length_8
+ cmp iv_len, 9
+ je %%_iv_length_9
+ cmp iv_len, 10
+ je %%_iv_length_10
+ cmp iv_len, 11
+ je %%_iv_length_11
+ cmp iv_len, 12
+ je %%_iv_length_12
+
+ ;; Bytes 8 - 13
+%%_iv_length_13:
+ pinsrb init_block0, [tmp + 12], 13
+%%_iv_length_12:
+ pinsrb init_block0, [tmp + 11], 12
+%%_iv_length_11:
+ pinsrd init_block0, [tmp + 7], 2
+ jmp %%_finish_nonce_move
+%%_iv_length_10:
+ pinsrb init_block0, [tmp + 9], 10
+%%_iv_length_9:
+ pinsrb init_block0, [tmp + 8], 9
+%%_iv_length_8:
+ pinsrb init_block0, [tmp + 7], 8
+
+%%_finish_nonce_move:
+
+ ;; Bytes 14 & 15 (message length), in Big Endian
+ mov ax, [job + _msg_len_to_hash_in_bytes]
+ xchg al, ah
+ pinsrw init_block0, ax, 7
+
+ mov aad_len, [job + _cbcmac_aad_len]
+ ;; Initial length to authenticate (Block 0)
+ mov auth_len, 16
+ ;; Length to authenticate (Block 0 + len(AAD) (2B) + AAD padded,
+ ;; so length is multiple of 64B)
+ lea auth_len_aad, [aad_len + (2 + 15) + 16]
+ and auth_len_aad, -16
+
+ or aad_len, aad_len
+ cmovne auth_len, auth_len_aad
+ ;; Update lengths to authenticate and find min length
+ movdqa ccm_lens, [state + _aes_ccm_lens]
+ XPINSRW ccm_lens, xtmp0, tmp2, lane, auth_len, scale_x16
+ movdqa [state + _aes_ccm_lens], ccm_lens
+ phminposuw min_len_idx, ccm_lens
+
+ mov tmp, lane
+ shl tmp, 6
+ lea init_block_addr, [state + _aes_ccm_init_blocks + tmp]
+ or aad_len, aad_len
+ je %%_aad_complete
+
+ or flags, (1 << 6) ; Set Adata bit in flags
+
+ ;; Copy AAD
+ ;; Set all 0s in last block (padding)
+ lea tmp, [init_block_addr + auth_len]
+ sub tmp, 16
+ pxor xtmp0, xtmp0
+ movdqa [tmp], xtmp0
+
+ ;; Start copying from second block
+ lea tmp, [init_block_addr+16]
+ mov rax, aad_len
+ xchg al, ah
+ mov [tmp], ax
+ add tmp, 2
+ mov tmp2, [job + _cbcmac_aad]
+ memcpy_sse_64_1 tmp, tmp2, aad_len, tmp3, tmp4, xtmp0, xtmp1, xtmp2, xtmp3
+
+%%_aad_complete:
+
+ ;; Finish Block 0 with Byte 0
+ pinsrb init_block0, BYTE(flags), 0
+ movdqa [init_block_addr], init_block0
+
+ ;; args.in[lane] = &initial_block
+ mov [state + _aes_ccm_args_in + lane * 8], init_block_addr
+
+ cmp byte [state + _aes_ccm_unused_lanes], 0xf
+ jne %%_return_null
+
+%else ; end SUBMIT
+
+ ;; Check at least one job
+ bt unused_lanes, 19
+ jc %%_return_null
+
+ ;; Find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp qword [state + _aes_ccm_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp qword [state + _aes_ccm_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp qword [state + _aes_ccm_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+
+ ; Copy good_lane to empty lanes
+ movzx tmp, word [state + _aes_ccm_init_done + good_lane*2]
+ mov tmp2, [state + _aes_ccm_args_in + good_lane*8]
+ mov tmp3, [state + _aes_ccm_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ movdqa xtmp0, [state + _aes_ccm_args_IV + good_lane]
+ movdqa ccm_lens, [state + _aes_ccm_lens]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_ccm_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ por ccm_lens, [rel len_masks + 16*I]
+ mov [state + _aes_ccm_init_done + I*2], WORD(tmp)
+ mov [state + _aes_ccm_args_in + I*8], tmp2
+ mov [state + _aes_ccm_args_keys + I*8], tmp3
+ movdqa [state + _aes_ccm_args_IV + I*16], xtmp0
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+ movdqa [state + _aes_ccm_lens], ccm_lens
+ ;; Find min length
+ phminposuw min_len_idx, ccm_lens
+
+%endif ; end FLUSH
+
+%%_ccm_round:
+ pextrw len2, min_len_idx, 0 ; min value
+ pextrw min_idx, min_len_idx, 1 ; min index (0...3)
+
+ mov min_job, [state + _aes_ccm_job_in_lane + min_idx*8]
+
+ or len2, len2
+ je %%_len_is_0
+ ;; subtract min length from all lengths
+ pshuflw min_len_idx, min_len_idx, 0 ; broadcast min length
+ psubw ccm_lens, min_len_idx
+ movdqa [state + _aes_ccm_lens], ccm_lens
+
+ ; "state" and "args" are the same address, arg1
+ ; len2 is arg2
+ call AES128_CBC_MAC
+ ; state and min_idx are intact
+
+%%_len_is_0:
+
+ movzx tmp, WORD [state + _aes_ccm_init_done + min_idx*2]
+ cmp WORD(tmp), 0
+ je %%_prepare_full_blocks_to_auth
+ cmp WORD(tmp), 1
+ je %%_prepare_partial_block_to_auth
+
+%%_encrypt_digest:
+
+ ;; Set counter block 0 (reusing previous initial block 0)
+ mov tmp, min_idx
+ shl tmp, 3
+ movdqa init_block0, [state + _aes_ccm_init_blocks + tmp * 8]
+
+ pand init_block0, [rel counter_mask]
+
+ mov tmp2, [state + _aes_ccm_args_keys + tmp]
+ ENCRYPT_SINGLE_BLOCK tmp2, init_block0
+ pxor init_block0, [state + _aes_ccm_args_IV + tmp * 2]
+
+ ;; Copy Mlen bytes into auth_tag_output (Mlen = 4,6,8,10,12,14,16)
+ mov min_job, [state + _aes_ccm_job_in_lane + tmp]
+ mov tmp3, [min_job + _auth_tag_output_len_in_bytes]
+ mov tmp2, [min_job + _auth_tag_output]
+
+ simd_store_sse tmp2, init_block0, tmp3, tmp, rax
+
+%%_update_lanes:
+ ; Update unused lanes
+ mov unused_lanes, [state + _aes_ccm_unused_lanes]
+ shl unused_lanes, 4
+ or unused_lanes, min_idx
+ mov [state + _aes_ccm_unused_lanes], unused_lanes
+
+ ; Set return job
+ mov job_rax, min_job
+
+ mov qword [state + _aes_ccm_job_in_lane + min_idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+
+%ifdef SAFE_DATA
+ pxor xtmp0, xtmp0
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+ shl min_idx, 3
+ ;; Clear digest (in memory for CBC IV), counter block 0 and AAD of returned job
+ movdqa [state + _aes_ccm_args_IV + min_idx * 2], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + min_idx * 8], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 16], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 32], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + min_idx * 8 + 48], xtmp0
+ mov qword [state + _aes_ccm_args_keys + min_idx], 0
+%else
+ ;; Clear digest (in memory for CBC IV), counter block 0 and AAD
+ ;; of returned job and "NULL lanes"
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_ccm_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ movdqa [state + _aes_ccm_args_IV + I*16], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + I*64], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + I*64 + 16], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + I*64 + 32], xtmp0
+ movdqa [state + _aes_ccm_init_blocks + I*64 + 48], xtmp0
+ mov qword [state + _aes_ccm_args_keys + I*8], 0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SUBMIT
+%endif ;; SAFE_DATA
+
+%%_return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%%_return_null:
+ xor job_rax, job_rax
+ jmp %%_return
+
+%%_prepare_full_blocks_to_auth:
+
+ cmp dword [min_job + _cipher_direction], 2 ; DECRYPT
+ je %%_decrypt
+
+%%_encrypt:
+ mov tmp, [min_job + _src]
+ add tmp, [min_job + _hash_start_src_offset_in_bytes]
+ jmp %%_set_init_done_1
+
+%%_decrypt:
+ mov tmp, [min_job + _dst]
+
+%%_set_init_done_1:
+ mov [state + _aes_ccm_args_in + min_idx*8], tmp
+ mov word [state + _aes_ccm_init_done + min_idx*2], 1
+
+ ; Check if there are full blocks to hash
+ mov tmp, [min_job + _msg_len_to_hash_in_bytes]
+ and tmp, -16
+ je %%_prepare_partial_block_to_auth
+
+ ;; Update lengths to authenticate and find min length
+ movdqa ccm_lens, [state + _aes_ccm_lens]
+ XPINSRW ccm_lens, xtmp0, tmp2, min_idx, tmp, scale_x16
+ phminposuw min_len_idx, ccm_lens
+ movdqa [state + _aes_ccm_lens], ccm_lens
+
+ jmp %%_ccm_round
+
+%%_prepare_partial_block_to_auth:
+ ; Check if partial block needs to be hashed
+ mov auth_len, [min_job + _msg_len_to_hash_in_bytes]
+ and auth_len, 15
+ je %%_encrypt_digest
+
+ mov word [state + _aes_ccm_init_done + min_idx * 2], 2
+ ;; Update lengths to authenticate and find min length
+ movdqa ccm_lens, [state + _aes_ccm_lens]
+ XPINSRW ccm_lens, xtmp0, tmp2, min_idx, 16, scale_x16
+ phminposuw min_len_idx, ccm_lens
+ movdqa [state + _aes_ccm_lens], ccm_lens
+
+ mov tmp2, min_idx
+ shl tmp2, 6
+ add tmp2, 16 ; pb[AES_BLOCK_SIZE]
+ lea init_block_addr, [state + _aes_ccm_init_blocks + tmp2]
+ mov tmp2, [state + _aes_ccm_args_in + min_idx * 8]
+
+ simd_load_sse_15_1 xtmp0, tmp2, auth_len
+
+%%_finish_partial_block_copy:
+ movdqa [init_block_addr], xtmp0
+ mov [state + _aes_ccm_args_in + min_idx * 8], init_block_addr
+
+ jmp %%_ccm_round
+%endmacro
+
+
+align 64
+; JOB_AES_HMAC * submit_job_aes_ccm_auth_sse(MB_MGR_CCM_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_CCM_AUTH,function,internal)
+SUBMIT_JOB_AES_CCM_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_SSE SUBMIT
+
+; JOB_AES_HMAC * flush_job_aes_ccm_auth_sse(MB_MGR_CCM_OOO *state)
+; arg 1 : state
+MKGLOBAL(FLUSH_JOB_AES_CCM_AUTH,function,internal)
+FLUSH_JOB_AES_CCM_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CCM_AUTH_SSE FLUSH
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_cmac_submit_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_cmac_submit_flush_sse.asm
new file mode 100644
index 000000000..01c6315bd
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_cmac_submit_flush_sse.asm
@@ -0,0 +1,502 @@
+;;
+;; Copyright (c) 2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%ifndef AES128_CBC_MAC
+
+%define AES128_CBC_MAC aes128_cbc_mac_x4
+%define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_sse
+%define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_sse
+
+%endif
+
+extern AES128_CBC_MAC
+
+section .data
+default rel
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+; idx needs to be in rbp
+%define len rbp
+%define idx rbp
+%define tmp rbp
+
+%define lane r8
+
+%define iv r9
+%define m_last r10
+%define n r11
+
+%define unused_lanes rbx
+%define r rbx
+
+%define tmp3 r12
+%define tmp4 r13
+%define tmp2 r14
+
+%define good_lane r15
+%define rbits r15
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+;;; ===========================================================================
+;;; ===========================================================================
+;;; MACROS
+;;; ===========================================================================
+;;; ===========================================================================
+
+;;; ===========================================================================
+;;; AES CMAC job submit & flush
+;;; ===========================================================================
+;;; SUBMIT_FLUSH [in] - SUBMIT, FLUSH job selection
+%macro GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_SSE 1
+%define %%SUBMIT_FLUSH %1
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ;; Find free lane
+ mov unused_lanes, [state + _aes_cmac_unused_lanes]
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ mov [state + _aes_cmac_unused_lanes], unused_lanes
+
+ ;; Copy job info into lane
+ mov [state + _aes_cmac_job_in_lane + lane*8], job
+ ;; Copy keys into lane args
+ mov tmp, [job + _key_expanded]
+ mov [state + _aes_cmac_args_keys + lane*8], tmp
+ mov tmp, lane
+ shl tmp, 4 ; lane*16
+
+ ;; Zero IV to store digest
+ pxor xmm0, xmm0
+ movdqa [state + _aes_cmac_args_IV + tmp], xmm0
+
+ lea m_last, [state + _aes_cmac_scratch + tmp]
+
+ ;; calculate len
+ ;; convert bits to bytes (message length in bits for CMAC)
+ mov len, [job + _msg_len_to_hash_in_bits]
+ mov rbits, len
+ add len, 7 ; inc len if there are remainder bits
+ shr len, 3
+ and rbits, 7
+
+ ;; Check at least 1 or more blocks (get n)
+ mov n, len
+ add n, 0xf
+ shr n, 4
+
+ ;; Check for partial block
+ mov r, len
+ and r, 0xf
+
+ or n, n ; check one or more blocks?
+ jz %%_lt_one_block
+
+ ;; One or more blocks, potentially partial
+ mov word [state + _aes_cmac_init_done + lane*2], 0
+
+ mov tmp2, [job + _src]
+ add tmp2, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _aes_cmac_args_in + lane*8], tmp2
+
+ ;; len = (n-1)*16
+ lea tmp2, [n - 1]
+ shl tmp2, 4
+ movdqa xmm0, [state + _aes_cmac_lens]
+ XPINSRW xmm0, xmm1, tmp, lane, tmp2, scale_x16
+ movdqa [state + _aes_cmac_lens], xmm0
+
+ ;; check remainder bits
+ or rbits, rbits
+ jnz %%_not_complete_block_3gpp
+
+ ;; check if complete block
+ or r, r
+ jz %%_complete_block
+
+%%_not_complete_block:
+ ;; M_last = padding(M_n) XOR K2
+ lea tmp, [rel padding_0x80_tab16 + 16]
+ sub tmp, r
+ movdqu xmm0, [tmp]
+ movdqa [m_last], xmm0
+
+ mov tmp, [job + _src]
+ add tmp, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp, tmp3
+
+ memcpy_sse_16 m_last, tmp, r, tmp4, iv
+
+ ;; src + n + r
+ mov tmp3, [job + _skey2]
+ movdqa xmm1, [m_last]
+ movdqu xmm0, [tmp3]
+ pxor xmm0, xmm1
+ movdqa [m_last], xmm0
+
+%%_step_5:
+ ;; Find min length
+ movdqa xmm0, [state + _aes_cmac_lens]
+ phminposuw xmm1, xmm0
+
+ cmp byte [state + _aes_cmac_unused_lanes], 0xf
+ jne %%_return_null
+
+%else ; end SUBMIT
+
+ ;; Check at least one job
+ bt unused_lanes, 19
+ jc %%_return_null
+
+ ;; Find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp qword [state + _aes_cmac_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp qword [state + _aes_cmac_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp qword [state + _aes_cmac_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+
+ ; Copy good_lane to empty lanes
+ mov tmp2, [state + _aes_cmac_args_in + good_lane*8]
+ mov tmp3, [state + _aes_cmac_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ movdqa xmm2, [state + _aes_cmac_args_IV + good_lane]
+ movdqa xmm0, [state + _aes_cmac_lens]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_cmac_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_cmac_args_in + I*8], tmp2
+ mov [state + _aes_cmac_args_keys + I*8], tmp3
+ movdqa [state + _aes_cmac_args_IV + I*16], xmm2
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+ ;; Find min length
+ phminposuw xmm1, xmm0
+
+%endif ; end FLUSH
+
+%%_cmac_round:
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je %%_len_is_0
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _aes_cmac_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len2 is arg2
+ call AES128_CBC_MAC
+ ; state and idx are intact
+
+ movdqa xmm0, [state + _aes_cmac_lens] ; preload lens
+%%_len_is_0:
+ ; Check if job complete
+ test word [state + _aes_cmac_init_done + idx*2], 0xffff
+ jnz %%_copy_complete_digest
+
+ ; Finish step 6
+ mov word [state + _aes_cmac_init_done + idx*2], 1
+
+ XPINSRW xmm0, xmm1, tmp3, idx, 16, scale_x16
+ movdqa [state + _aes_cmac_lens], xmm0
+
+ phminposuw xmm1, xmm0 ; find min length
+
+ mov tmp3, idx
+ shl tmp3, 4 ; idx*16
+ lea m_last, [state + _aes_cmac_scratch + tmp3]
+ mov [state + _aes_cmac_args_in + idx*8], m_last
+
+ jmp %%_cmac_round
+
+%%_copy_complete_digest:
+ ; Job complete, copy digest to AT output
+ mov job_rax, [state + _aes_cmac_job_in_lane + idx*8]
+
+ mov tmp4, idx
+ shl tmp4, 4
+ lea tmp3, [state + _aes_cmac_args_IV + tmp4]
+ mov tmp4, [job_rax + _auth_tag_output_len_in_bytes]
+ mov tmp2, [job_rax + _auth_tag_output]
+
+ cmp tmp4, 16
+ jne %%_ne_16_copy
+
+ ;; 16 byte AT copy
+ movdqu xmm0, [tmp3]
+ movdqu [tmp2], xmm0
+ jmp %%_update_lanes
+
+%%_ne_16_copy:
+ memcpy_sse_16 tmp2, tmp3, tmp4, lane, iv
+
+%%_update_lanes:
+ ; Update unused lanes
+ mov unused_lanes, [state + _aes_cmac_unused_lanes]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _aes_cmac_unused_lanes], unused_lanes
+
+ ; Set return job
+ mov job_rax, [state + _aes_cmac_job_in_lane + idx*8]
+
+ mov qword [state + _aes_cmac_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+ ;; Clear digest (in memory for IV) and scratch memory of returned job
+ movdqa [tmp3], xmm0
+
+ shl idx, 4
+ movdqa [state + _aes_cmac_scratch + idx], xmm0
+
+%else
+ ;; Clear digest and scratch memory of returned job and "NULL lanes"
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_cmac_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ movdqa [state + _aes_cmac_args_IV + I*16], xmm0
+ movdqa [state + _aes_cmac_scratch + I*16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif ;; SUBMIT
+
+%endif ;; SAFE_DATA
+
+%%_return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%%_return_null:
+ xor job_rax, job_rax
+ jmp %%_return
+
+%ifidn %%SUBMIT_FLUSH, SUBMIT
+%%_complete_block:
+
+ ;; Block size aligned
+ mov tmp2, [job + _src]
+ add tmp2, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp2, tmp3
+
+ ;; M_last = M_n XOR K1
+ mov tmp3, [job + _skey1]
+ movdqu xmm0, [tmp3]
+ movdqu xmm1, [tmp2]
+ pxor xmm0, xmm1
+ movdqa [m_last], xmm0
+
+ jmp %%_step_5
+
+%%_lt_one_block:
+ ;; Single partial block
+ mov word [state + _aes_cmac_init_done + lane*2], 1
+ mov [state + _aes_cmac_args_in + lane*8], m_last
+
+ movdqa xmm0, [state + _aes_cmac_lens]
+ XPINSRW xmm0, xmm1, tmp2, lane, 16, scale_x16
+ movdqa [state + _aes_cmac_lens], xmm0
+
+ mov n, 1
+ jmp %%_not_complete_block
+
+%%_not_complete_block_3gpp:
+ ;; bit pad last block
+ ;; xor with skey2
+ ;; copy to m_last
+
+ ;; load pointer to src
+ mov tmp, [job + _src]
+ add tmp, [job + _hash_start_src_offset_in_bytes]
+ lea tmp3, [n - 1]
+ shl tmp3, 4
+ add tmp, tmp3
+
+ ;; check if partial block
+ or r, r
+ jz %%_load_full_block_3gpp
+
+ simd_load_sse_15_1 xmm0, tmp, r
+ dec r
+
+%%_update_mlast_3gpp:
+ ;; set last byte padding mask
+ ;; shift into correct xmm idx
+
+ ;; save and restore rcx on windows
+%ifndef LINUX
+ mov tmp, rcx
+%endif
+ mov rcx, rbits
+ mov tmp3, 0xff
+ shr tmp3, cl
+ movq xmm2, tmp3
+ XPSLLB xmm2, r, xmm1, tmp2
+
+ ;; pad final byte
+ pandn xmm2, xmm0
+%ifndef LINUX
+ mov rcx, tmp
+%endif
+ ;; set OR mask to pad final bit
+ mov tmp2, tmp3
+ shr tmp2, 1
+ xor tmp2, tmp3 ; XOR to get OR mask
+ movq xmm3, tmp2
+ ;; xmm1 contains shift table from previous shift
+ pshufb xmm3, xmm1
+
+ ;; load skey2 address
+ mov tmp3, [job + _skey2]
+ movdqu xmm1, [tmp3]
+
+ ;; set final padding bit
+ por xmm2, xmm3
+
+ ;; XOR last partial block with skey2
+ ;; update mlast
+ pxor xmm2, xmm1
+ movdqa [m_last], xmm2
+
+ jmp %%_step_5
+
+%%_load_full_block_3gpp:
+ movdqu xmm0, [tmp]
+ mov r, 0xf
+ jmp %%_update_mlast_3gpp
+%endif
+%endmacro
+
+
+align 64
+; JOB_AES_HMAC * submit_job_aes_cmac_auth_sse(MB_MGR_CMAC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_CMAC_AUTH,function,internal)
+SUBMIT_JOB_AES_CMAC_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_SSE SUBMIT
+
+; JOB_AES_HMAC * flush_job_aes_cmac_auth_sse(MB_MGR_CMAC_OOO *state)
+; arg 1 : state
+MKGLOBAL(FLUSH_JOB_AES_CMAC_AUTH,function,internal)
+FLUSH_JOB_AES_CMAC_AUTH:
+ GENERIC_SUBMIT_FLUSH_JOB_AES_CMAC_SSE FLUSH
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_flush_sse.asm
new file mode 100644
index 000000000..0066aff9f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_flush_sse.asm
@@ -0,0 +1,217 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+
+%ifndef AES_CBC_ENC_X4
+%define AES_CBC_ENC_X4 aes_cbc_enc_128_x4
+%define FLUSH_JOB_AES_ENC flush_job_aes128_enc_sse
+%endif
+
+; void AES_CBC_ENC_X4(AES_ARGS *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X4
+
+section .data
+default rel
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+%define unused_lanes rbx
+%define tmp1 rbx
+
+%define good_lane rdx
+%define iv rdx
+
+%define tmp2 rax
+
+; idx needs to be in rbp
+%define tmp rbp
+%define idx rbp
+
+%define tmp3 r8
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FLUSH_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(FLUSH_JOB_AES_ENC,function,internal)
+FLUSH_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; check for empty
+ mov unused_lanes, [state + _aes_unused_lanes]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor good_lane, good_lane
+ cmp qword [state + _aes_job_in_lane + 1*8], 0
+ cmovne good_lane, [rel one]
+ cmp qword [state + _aes_job_in_lane + 2*8], 0
+ cmovne good_lane, [rel two]
+ cmp qword [state + _aes_job_in_lane + 3*8], 0
+ cmovne good_lane, [rel three]
+
+ ; copy good_lane to empty lanes
+ mov tmp1, [state + _aes_args_in + good_lane*8]
+ mov tmp2, [state + _aes_args_out + good_lane*8]
+ mov tmp3, [state + _aes_args_keys + good_lane*8]
+ shl good_lane, 4 ; multiply by 16
+ movdqa xmm2, [state + _aes_args_IV + good_lane]
+ movdqa xmm0, [state + _aes_lens]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_job_in_lane + I*8], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_args_in + I*8], tmp1
+ mov [state + _aes_args_out + I*8], tmp2
+ mov [state + _aes_args_keys + I*8], tmp3
+ movdqa [state + _aes_args_IV + I*16], xmm2
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ ; Find min length
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _aes_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X4
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _aes_unused_lanes], unused_lanes
+%ifdef SAFE_DATA
+ ;; Clear IVs of returned job and "NULL lanes"
+ pxor xmm0, xmm0
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_job_in_lane + I*8], 0
+ jne APPEND(skip_clear_,I)
+ movdqa [state + _aes_args_IV + I*16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_submit_sse.asm
new file mode 100644
index 000000000..702fb91a4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_submit_sse.asm
@@ -0,0 +1,187 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+
+%ifndef AES_CBC_ENC_X4
+
+%define AES_CBC_ENC_X4 aes_cbc_enc_128_x4
+%define SUBMIT_JOB_AES_ENC submit_job_aes128_enc_sse
+
+%endif
+
+; void AES_CBC_ENC_X4(AES_ARGS *args, UINT64 len_in_bytes);
+extern AES_CBC_ENC_X4
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+; idx needs to be in rbp
+%define len rbp
+%define idx rbp
+%define tmp rbp
+
+%define lane r8
+
+%define iv r9
+
+%define unused_lanes rbx
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+section .text
+
+; JOB* SUBMIT_JOB_AES_ENC(MB_MGR_AES_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_ENC,function,internal)
+SUBMIT_JOB_AES_ENC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _aes_unused_lanes]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ mov iv, [job + _iv]
+ mov [state + _aes_unused_lanes], unused_lanes
+
+ mov [state + _aes_job_in_lane + lane*8], job
+ mov tmp, [job + _src]
+ add tmp, [job + _cipher_start_src_offset_in_bytes]
+ movdqu xmm0, [iv]
+ mov [state + _aes_args_in + lane*8], tmp
+ mov tmp, [job + _aes_enc_key_expanded]
+ mov [state + _aes_args_keys + lane*8], tmp
+ mov tmp, [job + _dst]
+ mov [state + _aes_args_out + lane*8], tmp
+ shl lane, 4 ; multiply by 16
+ movdqa [state + _aes_args_IV + lane], xmm0
+
+ ;; insert len into proper lane
+ mov len, [job + _msg_len_to_cipher_in_bytes]
+ and len, -16 ; DOCSIS may pass size unaligned to block size
+
+ movdqa xmm0, [state + _aes_lens]
+ XPINSRW xmm0, xmm1, tmp, lane, len, no_scale
+ movdqa [state + _aes_lens], xmm0
+
+ cmp unused_lanes, 0xff
+ jne return_null
+
+ ; Find min length
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _aes_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_CBC_ENC_X4
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ mov job_rax, [state + _aes_job_in_lane + idx*8]
+ mov unused_lanes, [state + _aes_unused_lanes]
+ mov qword [state + _aes_job_in_lane + idx*8], 0
+ or dword [job_rax + _status], STS_COMPLETED_AES
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _aes_unused_lanes], unused_lanes
+%ifdef SAFE_DATA
+ ;; Clear IV
+ pxor xmm0, xmm0
+ shl idx, 3 ; multiply by 8
+ movdqa [state + _aes_args_IV + idx*2], xmm0
+ mov qword [state + _aes_args_keys + idx], 0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_flush_sse.asm
new file mode 100644
index 000000000..6069ce17a
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_flush_sse.asm
@@ -0,0 +1,242 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+
+%ifndef AES_XCBC_X4
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4
+%define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_sse
+%endif
+
+; void AES_XCBC_X4(AES_XCBC_ARGS_x8 *args, UINT64 len_in_bytes);
+extern AES_XCBC_X4
+
+section .data
+default rel
+
+align 16
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%define APPEND(a,b) a %+ b
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+%define unused_lanes rbx
+%define tmp1 rbx
+
+%define icv rdx
+
+%define tmp2 rax
+
+; idx needs to be in rbp
+%define tmp r10
+%define idx rbp
+
+%define tmp3 r8
+%define lane_data r9
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* FLUSH_JOB_AES_XCBC(MB_MGR_AES_XCBC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(FLUSH_JOB_AES_XCBC,function,internal)
+FLUSH_JOB_AES_XCBC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ ; check for empty
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _aes_xcbc_ldata + 1 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _aes_xcbc_ldata + 2 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _aes_xcbc_ldata + 3 * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ cmovne idx, [rel three]
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ mov tmp1, [state + _aes_xcbc_args_in + idx*8]
+ mov tmp3, [state + _aes_xcbc_args_keys + idx*8]
+ shl idx, 4 ; multiply by 16
+ movdqa xmm2, [state + _aes_xcbc_args_ICV + idx]
+ movdqa xmm0, [state + _aes_xcbc_lens]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_xcbc_ldata + I * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _aes_xcbc_args_in + I*8], tmp1
+ mov [state + _aes_xcbc_args_keys + I*8], tmp3
+ movdqa [state + _aes_xcbc_args_ICV + I*16], xmm2
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ movdqa [state + _aes_xcbc_lens], xmm0
+
+ ; Find min length
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _aes_xcbc_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_XCBC_X4
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ cmp dword [lane_data + _xcbc_final_done], 0
+ jne end_loop
+
+ mov dword [lane_data + _xcbc_final_done], 1
+ mov word [state + _aes_xcbc_lens + 2*idx], 16
+ lea tmp, [lane_data + _xcbc_final_block]
+ mov [state + _aes_xcbc_args_in + 8*idx], tmp
+ jmp copy_lane_data
+
+end_loop:
+ mov job_rax, [lane_data + _xcbc_job_in_lane]
+ mov icv, [job_rax + _auth_tag_output]
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ mov qword [lane_data + _xcbc_job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ shl idx, 4 ; multiply by 16
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+
+ ; copy 12 bytes
+ movdqa xmm0, [state + _aes_xcbc_args_ICV + idx]
+ movq [icv], xmm0
+ pextrd [icv + 8], xmm0, 2
+
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear ICV's and final blocks in returned job and NULL lanes
+%assign I 0
+%rep 4
+ cmp qword [state + _aes_xcbc_ldata + I * _XCBC_LANE_DATA_size + _xcbc_job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+ movdqa [state + _aes_xcbc_args_ICV + I*16], xmm0
+ lea lane_data, [state + _aes_xcbc_ldata + (I * _XCBC_LANE_DATA_size)]
+ movdqa [lane_data + _xcbc_final_block], xmm0
+ movdqa [lane_data + _xcbc_final_block + 16], xmm0
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+%endif
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_submit_sse.asm
new file mode 100644
index 000000000..e61cc07b1
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_aes_xcbc_submit_sse.asm
@@ -0,0 +1,263 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/const.inc"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%ifndef AES_XCBC_X4
+%define AES_XCBC_X4 aes_xcbc_mac_128_x4
+%define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_sse
+%endif
+
+; void AES_XCBC_X4(AES_XCBC_ARGS_x8 *args, UINT64 len_in_bytes);
+extern AES_XCBC_X4
+
+section .data
+default rel
+
+align 16
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+%define job_rax rax
+
+%if 1
+; idx needs to be in rbp
+%define idx rbp
+%define last_len rbp
+
+%define lane r8
+
+%define icv r9
+%define p2 r9
+
+%define tmp r10
+%define len r11
+%define lane_data r12
+%define p r13
+%define tmp2 r14
+
+%define unused_lanes rbx
+%endif
+
+; STACK_SPACE needs to be an odd multiple of 8
+; This routine and its callee clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* SUBMIT_JOB_AES_XCBC(MB_MGR_AES_XCBC_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(SUBMIT_JOB_AES_XCBC,function,internal)
+SUBMIT_JOB_AES_XCBC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov [lane_data + _xcbc_job_in_lane], job
+ mov dword [lane_data + _xcbc_final_done], 0
+ mov tmp, [job + _k1_expanded]
+ mov [state + _aes_xcbc_args_keys + lane*8], tmp
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+
+ mov last_len, len
+
+ cmp len, 16
+ jle small_buffer
+
+ mov [state + _aes_xcbc_args_in + lane*8], p
+ add p, len ; set point to end of data
+
+ and last_len, 15 ; Check lsbs of msg len
+ jnz slow_copy ; if not 16B mult, do slow copy
+
+fast_copy:
+ movdqu xmm0, [p - 16] ; load last block M[n]
+ mov tmp, [job + _k2] ; load K2 address
+ movdqu xmm1, [tmp] ; load K2
+ pxor xmm0, xmm1 ; M[n] XOR K2
+ movdqa [lane_data + _xcbc_final_block], xmm0
+ sub len, 16 ; take last block off length
+end_fast_copy:
+ pxor xmm0, xmm0
+ shl lane, 4 ; multiply by 16
+ movdqa [state + _aes_xcbc_args_ICV + lane], xmm0
+
+ ;; insert len into proper lane
+ movdqa xmm0, [state + _aes_xcbc_lens]
+ XPINSRW xmm0, xmm1, tmp, lane, len, no_scale
+ movdqa [state + _aes_xcbc_lens], xmm0
+
+ cmp unused_lanes, 0xff
+ jne return_null
+
+start_loop:
+ ; Find min length
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _aes_xcbc_lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call AES_XCBC_X4
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _XCBC_LANE_DATA_size
+ lea lane_data, [state + _aes_xcbc_ldata + lane_data]
+ cmp dword [lane_data + _xcbc_final_done], 0
+ jne end_loop
+
+ mov dword [lane_data + _xcbc_final_done], 1
+ mov word [state + _aes_xcbc_lens + 2*idx], 16
+ lea tmp, [lane_data + _xcbc_final_block]
+ mov [state + _aes_xcbc_args_in + 8*idx], tmp
+ movdqa xmm0, [state + _aes_xcbc_lens]
+ jmp start_loop
+
+end_loop:
+ ; process completed job "idx"
+ mov job_rax, [lane_data + _xcbc_job_in_lane]
+ mov icv, [job_rax + _auth_tag_output]
+ mov unused_lanes, [state + _aes_xcbc_unused_lanes]
+ mov qword [lane_data + _xcbc_job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ shl idx, 4 ; multiply by 16
+ mov [state + _aes_xcbc_unused_lanes], unused_lanes
+
+ ; copy 12 bytes
+ movdqa xmm0, [state + _aes_xcbc_args_ICV + idx]
+ movq [icv], xmm0
+ pextrd [icv + 8], xmm0, 2
+
+%ifdef SAFE_DATA
+ ;; Clear ICV
+ pxor xmm0, xmm0
+ movdqa [state + _aes_xcbc_args_ICV + idx], xmm0
+
+ ;; Clear final block (32 bytes)
+ movdqa [lane_data + _xcbc_final_block], xmm0
+ movdqa [lane_data + _xcbc_final_block + 16], xmm0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+small_buffer:
+ ; For buffers <= 16 Bytes
+ ; The input data is set to final block
+ lea tmp, [lane_data + _xcbc_final_block] ; final block
+ mov [state + _aes_xcbc_args_in + lane*8], tmp
+ add p, len ; set point to end of data
+ cmp len, 16
+ je fast_copy
+
+slow_copy:
+ and len, ~15 ; take final block off len
+ sub p, last_len ; adjust data pointer
+ lea p2, [lane_data + _xcbc_final_block + 16] ; upper part of final
+ sub p2, last_len ; adjust data pointer backwards
+ memcpy_sse_16_1 p2, p, last_len, tmp, tmp2
+ movdqa xmm0, [rel x80] ; fill reg with padding
+ movdqu [lane_data + _xcbc_final_block + 16], xmm0 ; add padding
+ movdqu xmm0, [p2] ; load final block to process
+ mov tmp, [job + _k3] ; load K3 address
+ movdqu xmm1, [tmp] ; load K3
+ pxor xmm0, xmm1 ; M[n] XOR K3
+ movdqu [lane_data + _xcbc_final_block], xmm0 ; write final block
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_ni_sse.asm
new file mode 100644
index 000000000..ac1bb8691
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_ni_sse.asm
@@ -0,0 +1,305 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8
+;; Windows preserves: RBX RBP RSI RDI R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RSI RDI R8
+;; Linux preserves: RBX RCX RDX RBP R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_ni
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+one:
+ dq 1
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+%define p2 r8
+
+; This routine clobbers rbx, rbp
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_ni_sse(MB_MGR_HMAC_SHA_1_OOO *state)
+; arg 1 : state
+MKGLOBAL(flush_job_hmac_ni_sse,function,internal)
+flush_job_hmac_ni_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha1-ni-sse flush"
+ mov unused_lanes, [state + _unused_lanes]
+ bt unused_lanes, 16+7
+ jc return_null
+
+ ; find a lane with a non-null job, assume it is 0 then check 1
+ xor idx, idx
+ cmp qword [state + _ldata + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ DBGPRINTL64 "idx:", idx
+
+copy_lane_data:
+ ; copy valid lane (idx) to empty lanes
+ mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
+ movzx len2, word [state + _lens + idx*2]
+
+ DBGPRINTL64 "ptr", tmp
+
+ ; there are only two lanes so if one is empty it is easy to determine which one
+ xor idx, 1
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ xor idx, 1
+
+ ; No need to find min length - only two lanes available
+ cmp len2, 0
+ je len_is_0
+
+ ; Set length on both lanes to 0
+ mov dword [state + _lens], 0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_ni
+ ; state is intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ DBGPRINTL64 "outer-block-index", idx
+ lea tmp, [lane_data + _outer_block]
+ DBGPRINTL64 "outer block ptr:", tmp
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ ;; idx determines which column
+ ;; read off from consecutive rows
+%if SHA1NI_DIGEST_ROW_SIZE != 20
+%error "Below code has been optimized for SHA1NI_DIGEST_ROW_SIZE = 20!"
+%endif
+ lea p2, [idx + idx*4]
+ movdqu xmm0, [state + _args_digest + p2*4]
+ pshufb xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + p2*4 + 4*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp)
+ movdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ DBGPRINTL_XMM "sha1 outer hash input words[0-3]", xmm0
+ DBGPRINTL64 "sha1 outer hash input word 4", tmp
+ mov job, [lane_data + _job_in_lane]
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*SHA1_DIGEST_WORD_SIZE]
+ movdqu [state + _args_digest + p2*4], xmm0
+ mov [state + _args_digest + p2*4 + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ DBGPRINTL64 "extra blocks-start offset", start_offset
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ DBGPRINTL64 "extra blocks-len", extra_blocks
+ lea tmp, [lane_data + _extra_block + start_offset]
+ DBGPRINTL64 "extra block ptr", tmp
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+%if SHA1NI_DIGEST_ROW_SIZE != 20
+%error "Below code has been optimized for SHA1NI_DIGEST_ROW_SIZE = 20!"
+%endif
+ lea idx, [idx + idx*4]
+ mov DWORD(tmp2), [state + _args_digest + idx*4 + 0*SHA1_DIGEST_WORD_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + idx*4 + 1*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov DWORD(tmp2), [state + _args_digest + idx*4 + 2*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp2)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp2), [state + _args_digest + idx*4 + 3*SHA1_DIGEST_WORD_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + idx*4 + 4*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 3*4], DWORD(tmp2)
+ mov [p + 4*4], DWORD(tmp4)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ movdqu [state + _args_digest + I*20], xmm0
+ mov dword [state + _args_digest + I*20 + 16], 0
+
+ lea lane_data, [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 20 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_sse.asm
new file mode 100644
index 000000000..0f760b01c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_flush_sse.asm
@@ -0,0 +1,302 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_mult_sse
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+x80: ;ddq 0x00000000000000000000000000000080
+ dq 0x0000000000000080, 0x0000000000000000
+x00: ;ddq 0x00000000000000000000000000000000
+ dq 0x0000000000000000, 0x0000000000000000
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%endif
+
+; This routine clobbers rbx, rbp
+struc STACK
+_gpr_save: resq 2
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state)
+; arg 1 : state
+MKGLOBAL(flush_job_hmac_sse,function,internal)
+flush_job_hmac_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha1-sse flush"
+ mov unused_lanes, [state + _unused_lanes]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel three]
+copy_lane_data:
+ ; copy valid lane (idx) to empty lanes
+ movdqa xmm0, [state + _lens]
+ mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr + PTR_SZ*I], tmp
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ movdqa [state + _lens], xmm0
+
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_mult_sse
+ ; state is intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ ;; idx determines which column
+ ;; read off from consecutive rows
+ movd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ pshufb xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ movdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*4], DWORD(tmp)
+ DBGPRINTL_XMM "sha1 outer hash input words[0-3]", xmm0
+ DBGPRINTL64 "sha1 outer hash input word 4", tmp
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ movd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp2)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp4)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*I + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ lea lane_data, [state + _ldata + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 20 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_flush_sse.asm
new file mode 100644
index 000000000..d23f37976
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_flush_sse.asm
@@ -0,0 +1,318 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern md5_x4x2_sse
+
+section .data
+default rel
+align 16
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+ ;ddq 0x000000000000FFFF0000000000000000
+ dq 0x0000000000000000, 0x000000000000FFFF
+ ;ddq 0x00000000FFFF00000000000000000000
+ dq 0x0000000000000000, 0x00000000FFFF0000
+ ;ddq 0x0000FFFF000000000000000000000000
+ dq 0x0000000000000000, 0x0000FFFF00000000
+ ;ddq 0xFFFF0000000000000000000000000000
+ dq 0x0000000000000000, 0xFFFF000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+four: dq 4
+five: dq 5
+six: dq 6
+seven: dq 7
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define idx rbp
+
+; unused_lanes must be in rax-rdx
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+%define tmp5 r9
+
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* flush_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(flush_job_hmac_md5_sse,function,internal)
+flush_job_hmac_md5_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_md5]
+ bt unused_lanes, 32+3
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_md5 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata_md5 + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata_md5 + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel three]
+ cmp qword [state + _ldata_md5 + 4 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel four]
+ cmp qword [state + _ldata_md5 + 5 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel five]
+ cmp qword [state + _ldata_md5 + 6 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel six]
+ cmp qword [state + _ldata_md5 + 7 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
+ cmovne idx, [rel seven]
+
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ movdqa xmm0, [state + _lens_md5]
+ mov tmp, [state + _args_data_ptr_md5 + PTR_SZ*idx]
+
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_md5 + PTR_SZ*I], tmp
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ movdqa [state + _lens_md5], xmm0
+
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshufb xmm1, [rel dupw] ; duplicate words across all lanes
+ psubw xmm0, xmm1
+ movdqa [state + _lens_md5], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x4x2_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_md5 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ movd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+; pshufb xmm0, [byteswap wrt rip]
+ movdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_md5 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_md5]
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+; bswap DWORD(tmp2)
+; bswap DWORD(tmp4)
+; bswap DWORD(tmp3)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp5)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp5)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 8
+ cmp qword [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (16 bytes)
+%assign J 0
+%rep 4
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*I + J*MD5_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+
+ lea lane_data, [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 16 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_submit_sse.asm
new file mode 100644
index 000000000..acf78fd6d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_submit_sse.asm
@@ -0,0 +1,356 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/memcpy.asm"
+%include "include/reg_sizes.asm"
+%include "include/const.inc"
+
+extern md5_x4x2_sse
+
+section .data
+default rel
+align 16
+;byteswap: ddq 0x0c0d0e0f08090a0b0405060700010203
+dupw: ;ddq 0x01000100010001000100010001000100
+ dq 0x0100010001000100, 0x0100010001000100
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbp
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine and/or the called routine clobbers all GPRs
+struc STACK
+_gpr_save: resq 8
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_md5_sse,function,internal)
+submit_job_hmac_md5_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _gpr_save + 8*3], r13
+ mov [rsp + _gpr_save + 8*4], r14
+ mov [rsp + _gpr_save + 8*5], r15
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*6], rsi
+ mov [rsp + _gpr_save + 8*7], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov lane, unused_lanes
+ and lane, 0xF
+ shr unused_lanes, 4
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov [state + _unused_lanes_md5], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ ;; insert len into proper lane
+ movdqa xmm0, [state + _lens_md5]
+ XPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ movdqa [state + _lens_md5], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ movdqu xmm0, [p - 64 + 0*16]
+ movdqu xmm1, [p - 64 + 1*16]
+ movdqu xmm2, [p - 64 + 2*16]
+ movdqu xmm3, [p - 64 + 3*16]
+ movdqa [lane_data + _extra_block + 0*16], xmm0
+ movdqa [lane_data + _extra_block + 1*16], xmm1
+ movdqa [lane_data + _extra_block + 2*16], xmm2
+ movdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+; bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ movdqu xmm0, [tmp]
+ movd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*lane + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ movdqa xmm0, [state + _lens_md5]
+ XPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ movdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xf
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ movdqa xmm0, [state + _lens_md5]
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshufb xmm1, [rel dupw] ; duplicate words across all lanes
+ psubw xmm0, xmm1
+ movdqa [state + _lens_md5], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call md5_x4x2_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ movdqa xmm0, [state + _lens_md5]
+ XPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ movdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+
+ movd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
+; pshufb xmm0, [rel byteswap]
+ movdqa [lane_data + _outer_block], xmm0
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ movdqa xmm0, [state + _lens_md5]
+ XPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ movdqa [state + _lens_md5], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exiting
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_sse_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_md5]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_md5]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 4
+ or unused_lanes, idx
+ mov [state + _unused_lanes_md5], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+
+ cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ; copy 16 bytes
+ mov DWORD(tmp3), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
+ mov [p + 3*4], DWORD(tmp3)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (16B), outer_block (16B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 0
+
+ pxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_md5 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 16 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov r13, [rsp + _gpr_save + 8*3]
+ mov r14, [rsp + _gpr_save + 8*4]
+ mov r15, [rsp + _gpr_save + 8*5]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*6]
+ mov rdi, [rsp + _gpr_save + 8*7]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_ni_sse.asm
new file mode 100644
index 000000000..23fcd74d7
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_ni_sse.asm
@@ -0,0 +1,28 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+%define SHA224
+%include "sse/mb_mgr_hmac_sha_256_flush_ni_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_sse.asm
new file mode 100644
index 000000000..e1f11a44f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_flush_sse.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_224_sse
+%define SHA224
+
+%include "sse/mb_mgr_hmac_sha_256_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_ni_sse.asm
new file mode 100644
index 000000000..12c0350af
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_ni_sse.asm
@@ -0,0 +1,28 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+%define SHA224
+%include "sse/mb_mgr_hmac_sha_256_submit_ni_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_sse.asm
new file mode 100644
index 000000000..111f5092c
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_224_submit_sse.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_224_sse
+%define SHA224
+
+%include "sse/mb_mgr_hmac_sha_256_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_ni_sse.asm
new file mode 100644
index 000000000..9a2f20ffc
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_ni_sse.asm
@@ -0,0 +1,333 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha256_ni
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%define bswap_xmm4 xmm4
+
+struc STACK
+_gpr_save: resq 4 ;rbx, rbp, rsi (win), rdi (win)
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203
+ dq 0x0c0d0e0f08090a0b
+
+one: dq 1
+
+section .text
+
+%ifdef SHA224
+;; JOB* flush_job_hmac_sha_224_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state)
+;; arg1 : state
+MKGLOBAL(flush_job_hmac_sha_224_ni_sse,function,internal)
+flush_job_hmac_sha_224_ni_sse:
+%else
+;; JOB* flush_job_hmac_sha_256_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state)
+;; arg1 : state
+MKGLOBAL(flush_job_hmac_sha_256_ni_sse,function,internal)
+flush_job_hmac_sha_256_ni_sse:
+%endif
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha256-ni-sse flush"
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ bt unused_lanes, 16+7
+ jc return_null
+
+ ; find a lane with a non-null job, assume it is 0 then check 1
+ xor idx, idx
+ cmp qword [state + _ldata_sha256 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ DBGPRINTL64 "idx:", idx
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ mov tmp, [state + _args_data_ptr_sha256 + PTR_SZ*idx]
+ xor len2, len2
+ mov WORD(len2), word [state + _lens_sha256 + idx*2]
+
+ ; there are only two lanes so if one is empty it is easy to determine which one
+ xor idx, 1
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+ xor idx, 1
+
+ ; No need to find min length - only two lanes available
+ cmp len2, 0
+ je len_is_0
+
+ ; set length on both lanes to 0
+ mov dword [state + _lens_sha256], 0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_ni
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ movdqa bswap_xmm4, [rel byteswap]
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+
+%if SHA256NI_DIGEST_ROW_SIZE != 32
+%error "Below code has been optimized for SHA256NI_DIGEST_ROW_SIZE = 32!"
+%endif
+ lea tmp4, [idx*8] ; x8 here + scale factor x4 below give x32
+ movdqu xmm0, [state + _args_digest_sha256 + tmp4*4]
+ movdqu xmm1, [state + _args_digest_sha256 + tmp4*4 + 4*4]
+ pshufb xmm0, bswap_xmm4
+ pshufb xmm1, bswap_xmm4
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ ;; overwrite top 4 bytes with 0x80
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+ DBGPRINTL "sha256 outer hash input words:"
+ DBGPRINT_XMM xmm0
+ DBGPRINT_XMM xmm1
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+ DBGPRINTL64 "auth_key_xor_opad", tmp
+ movdqu [state + _args_digest_sha256 + tmp4*4], xmm0
+ movdqu [state + _args_digest_sha256 + tmp4*4 + 4*4], xmm1
+ DBGPRINTL "new digest args"
+ DBGPRINT_XMM xmm0
+ DBGPRINT_XMM xmm1
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 16 bytes for SHA256, 14 bytes for SHA224
+%if SHA256NI_DIGEST_ROW_SIZE != 32
+%error "Below code has been optimized for SHA256NI_DIGEST_ROW_SIZE = 32!"
+%endif
+ shl idx, 5
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+ movdqu xmm0, [state + _args_digest_sha256 + idx]
+ pshufb xmm0, bswap_xmm4
+%ifdef SHA224
+ ;; SHA224
+ movq [p + 0*4], xmm0
+ pextrd [p + 2*4], xmm0, 2
+ pextrw [p + 3*4], xmm0, 6
+%else
+ ;; SHA256
+ movdqu [p], xmm0
+%endif
+ DBGPRINTL "auth_tag_output:"
+ DBGPRINT_XMM xmm0
+ jmp clear_ret
+
+copy_full_digest:
+ movdqu xmm0, [state + _args_digest_sha256 + idx]
+ movdqu xmm1, [state + _args_digest_sha256 + idx + 16]
+ pshufb xmm0, bswap_xmm4
+ pshufb xmm1, bswap_xmm4
+%ifdef SHA224
+ ;; SHA224
+ movdqu [p], xmm0
+ movq [p + 16], xmm1
+ pextrd [p + 16 + 8], xmm1, 2
+%else
+ ;; SHA256
+ movdqu [p], xmm0
+ movdqu [p + 16], xmm1
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest, outer_block (28B/32B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest
+ movdqa [state + _args_digest_sha256 + I*32], xmm0
+ movdqa [state + _args_digest_sha256 + I*32 + 16], xmm0
+
+ lea lane_data, [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ movdqa [lane_data + _outer_block + 16], xmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ DBGPRINTL "exit sha256-ni-sse flush"
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_sse.asm
new file mode 100644
index 000000000..5ab064b89
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_flush_sse.asm
@@ -0,0 +1,356 @@
+ ;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha_256_mult_sse
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+ ;ddq 0x00000000000000000000FFFF00000000
+ dq 0x0000FFFF00000000, 0x0000000000000000
+ ;ddq 0x0000000000000000FFFF000000000000
+ dq 0xFFFF000000000000, 0x0000000000000000
+one: dq 1
+two: dq 2
+three: dq 3
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_256_sse
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%endif
+
+; This routine clobbers rbx, rbp; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 3
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ bt unused_lanes, 32+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_sha256 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel one]
+ cmp qword [state + _ldata_sha256 + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel two]
+ cmp qword [state + _ldata_sha256 + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ cmovne idx, [rel three]
+
+copy_lane_data:
+ ; copy idx to empty lanes
+ movdqa xmm0, [state + _lens_sha256]
+ mov tmp, [state + _args_data_ptr_sha256 + 8*idx]
+
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha256 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_data_ptr_sha256 + 8*I], tmp
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ movdqa [state + _lens_sha256], xmm0
+
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha_256_mult_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ movd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ pshufb xmm0, [rel byteswap]
+ movd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ pshufb xmm1, [rel byteswap]
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+ movd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ movd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ pextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ pextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ pextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+ ;; copy 14 bytes for SHA224 / 16 bytes for SHA256
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp6)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp5)
+%else
+ mov [p + 3*4], DWORD(tmp5)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 / 32 bytes for SHA256
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+ bswap DWORD(tmp5)
+ mov [p + 0*4], DWORD(tmp2)
+ mov [p + 1*4], DWORD(tmp4)
+ mov [p + 2*4], DWORD(tmp6)
+ mov [p + 3*4], DWORD(tmp5)
+
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp6), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp4)
+ bswap DWORD(tmp6)
+%ifndef SHA224
+ bswap DWORD(tmp5)
+%endif
+ mov [p + 4*4], DWORD(tmp2)
+ mov [p + 5*4], DWORD(tmp4)
+ mov [p + 6*4], DWORD(tmp6)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B)
+ ;; of returned job and NULL jobs
+%assign I 0
+%rep 4
+ cmp qword [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (28 bytes for SHA-224, 32 bytes for SHA-256 bytes)
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*I + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha256 + (I*_HMAC_SHA1_LANE_DATA_size)]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ movdqa [lane_data + _outer_block + 16], xmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_ni_sse.asm
new file mode 100644
index 000000000..d4ded1f6d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_ni_sse.asm
@@ -0,0 +1,401 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha256_ni
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%define bswap_xmm4 xmm4
+
+struc STACK
+_gpr_save: resq 4 ; rbx, rbp, rsi (win), rdi (win)
+_rsp_save: resq 1
+endstruc
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203
+ dq 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifdef SHA224
+; JOB* submit_job_hmac_sha_224_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(submit_job_hmac_sha_224_ni_sse,function,internal)
+submit_job_hmac_sha_224_ni_sse:
+
+%else
+
+; JOB* submit_job_hmac_sha_256_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : state
+; arg 2 : job
+MKGLOBAL(submit_job_hmac_sha_256_ni_sse,function,internal)
+submit_job_hmac_sha_256_ni_sse:
+%endif
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha256-ni-sse submit"
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ movzx lane, BYTE(unused_lanes)
+ DBGPRINTL64 "lane: ", lane
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size ; SHA1 & SHA256 lane data is the same
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov [state + _unused_lanes_sha256], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ DBGPRINTL64 "length: ", len
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+ mov [state + _lens_sha256 + 2*lane], WORD(tmp)
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha256 + 8*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ movdqu xmm0, [p - 64 + 0*16]
+ movdqu xmm1, [p - 64 + 1*16]
+ movdqu xmm2, [p - 64 + 2*16]
+ movdqu xmm3, [p - 64 + 3*16]
+ movdqa [lane_data + _extra_block + 0*16], xmm0
+ movdqa [lane_data + _extra_block + 1*16], xmm1
+ movdqa [lane_data + _extra_block + 2*16], xmm2
+ movdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+%if SHA256NI_DIGEST_ROW_SIZE != 32
+%error "Below code has been optimized for SHA256NI_DIGEST_ROW_SIZE = 32!"
+%endif
+ lea tmp, [lane*8] ; x8 here plus x4 scale factor give x32
+ movdqu [state + _args_digest_sha256 + tmp*4], xmm0
+ movdqu [state + _args_digest_sha256 + tmp*4 + 4*4], xmm1
+ DBGPRINTL "args digest:"
+ DBGPRINT_XMM xmm0
+ DBGPRINT_XMM xmm1
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ mov [state + _lens_sha256 + 2*lane], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length - only two lanes available
+ xor len2, len2
+ mov tmp, 0x10000
+ mov WORD(len2), word [state + _lens_sha256 + 0*2] ; [0:15] - lane 0 length, [16:31] - lane index (0)
+ mov WORD(tmp), word [state + _lens_sha256 + 1*2] ; [0:15] - lane 1 length, [16:31] - lane index (1)
+ cmp WORD(len2), WORD(tmp)
+ cmovg DWORD(len2), DWORD(tmp) ; move if lane 0 length is greater than lane 1 length
+
+ mov idx, len2 ; retrieve index & length from [16:31] and [0:15] bit fields
+ shr DWORD(idx), 16
+ and DWORD(len2), 0xffff
+ je len_is_0
+
+ sub word [state + _lens_sha256 + 0*2], WORD(len2)
+ sub word [state + _lens_sha256 + 1*2], WORD(len2)
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha256_ni
+ ; state is intact
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ movdqa bswap_xmm4, [rel byteswap]
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens_sha256 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+
+%if SHA256NI_DIGEST_ROW_SIZE != 32
+%error "Below code has been optimized for SHA256NI_DIGEST_ROW_SIZE = 32!"
+%endif
+ lea tmp4, [idx*8] ; x8 here + scale factor x4 below give x32
+ movdqu xmm0, [state + _args_digest_sha256 + tmp4*4]
+ movdqu xmm1, [state + _args_digest_sha256 + tmp4*4 + 4*4]
+ pshufb xmm0, bswap_xmm4
+ pshufb xmm1, bswap_xmm4
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ ;; overwrite top 4 bytes with 0x80
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+ movdqu [state + _args_digest_sha256 + tmp4*4], xmm0
+ movdqu [state + _args_digest_sha256 + tmp4*4 + 4*4], xmm1
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exit
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_sse_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 16 bytes for SHA256, 14 for SHA224
+%if SHA256NI_DIGEST_ROW_SIZE != 32
+%error "Below code has been optimized for SHA256NI_DIGEST_ROW_SIZE = 32!"
+%endif
+ shl idx, 5
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+
+ movdqu xmm0, [state + _args_digest_sha256 + idx]
+ pshufb xmm0, bswap_xmm4
+%ifdef SHA224
+ ;; SHA224
+ movq [p + 0*4], xmm0
+ pextrd [p + 2*4], xmm0, 2
+ pextrw [p + 3*4], xmm0, 6
+%else
+ ;; SHA256
+ movdqu [p], xmm0
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ movdqu xmm0, [state + _args_digest_sha256 + idx]
+ movdqu xmm1, [state + _args_digest_sha256 + idx + 16]
+ pshufb xmm0, bswap_xmm4
+ pshufb xmm1, bswap_xmm4
+%ifdef SHA224
+ ;; SHA224
+ movdqu [p], xmm0
+ movq [p + 16], xmm1
+ pextrd [p + 16 + 8], xmm1, 2
+%else
+ ;; SHA256
+ movdqu [p], xmm0
+ movdqu [p + 16], xmm1
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+ ;; Clear digest, outer_block (28B/32B) and extra_block (64B) of returned job
+ movdqa [state + _args_digest_sha256 + idx], xmm0
+ movdqa [state + _args_digest_sha256 + idx + 16], xmm0
+
+ shr idx, 5 ;; Restore lane idx to 0 or 1
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ movdqa [lane_data + _outer_block + 16], xmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_sse.asm
new file mode 100644
index 000000000..8025b2f96
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_256_submit_sse.asm
@@ -0,0 +1,427 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha_256_mult_sse
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_256_sse
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r13-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rbx, rbp, rsi, rdi; called routine also clobbers r12
+struc STACK
+_gpr_save: resq 5
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _gpr_save + 8*2], r12
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*3], rsi
+ mov [rsp + _gpr_save + 8*4], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov [state + _unused_lanes_sha256], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ movdqa xmm0, [state + _lens_sha256]
+ XPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ movdqa [state + _lens_sha256], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha256 + 8*lane], p
+
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ movdqu xmm0, [p - 64 + 0*16]
+ movdqu xmm1, [p - 64 + 1*16]
+ movdqu xmm2, [p - 64 + 2*16]
+ movdqu xmm3, [p - 64 + 3*16]
+ movdqa [lane_data + _extra_block + 0*16], xmm0
+ movdqa [lane_data + _extra_block + 1*16], xmm1
+ movdqa [lane_data + _extra_block + 2*16], xmm2
+ movdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+ movd [state + _args_digest_sha256 + 4*lane + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_sha256 + 4*lane + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_sha256 + 4*lane + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_sha256 + 4*lane + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ movd [state + _args_digest_sha256 + 4*lane + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ pextrd [state + _args_digest_sha256 + 4*lane + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ pextrd [state + _args_digest_sha256 + 4*lane + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ pextrd [state + _args_digest_sha256 + 4*lane + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ movdqa xmm0, [state + _lens_sha256]
+ XPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ movdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ movdqa xmm0, [state + _lens_sha256]
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _lens_sha256], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha_256_mult_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ movdqa xmm0, [state + _lens_sha256]
+ XPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ movdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+
+ movd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
+ pshufb xmm0, [rel byteswap]
+ movd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
+%ifndef SHA224
+ pinsrd xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
+%endif
+ pshufb xmm1, [rel byteswap]
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 4*4], xmm1
+%ifdef SHA224
+ mov dword [lane_data + _outer_block + 7*4], 0x80
+%endif
+
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ movdqu xmm1, [tmp + 4*4]
+ movd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
+ movd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
+ pextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
+ pextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
+ pextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ movdqa xmm0, [state + _lens_sha256]
+ XPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ movdqa [state + _lens_sha256], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr_sha256 + 8*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ ;; p2 clobbers unused_lanes, undo before exit
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_sse_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes_sha256]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha256], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%ifdef SHA224
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 14
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 16
+ jne copy_full_digest
+%endif
+
+ ;; copy 14 bytes for SHA224 / 16 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+%ifdef SHA224
+ mov [p + 3*4], WORD(tmp4)
+%else
+ mov [p + 3*4], DWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 28 bytes for SHA224 / 32 bytes for SHA256
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ bswap DWORD(tmp4)
+ mov [p + 0*4], DWORD(tmp)
+ mov [p + 1*4], DWORD(tmp2)
+ mov [p + 2*4], DWORD(tmp3)
+ mov [p + 3*4], DWORD(tmp4)
+
+ mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE]
+%ifndef SHA224
+ mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE]
+%endif
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+%ifndef SHA224
+ bswap DWORD(tmp4)
+%endif
+ mov [p + 4*4], DWORD(tmp)
+ mov [p + 5*4], DWORD(tmp2)
+ mov [p + 6*4], DWORD(tmp3)
+%ifndef SHA224
+ mov [p + 7*4], DWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (28B/32B), outer_block (28B/32B) and extra_block (64B) of returned job
+%assign J 0
+%rep 7
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + J*SHA256_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%ifndef SHA224
+ mov dword [state + _args_digest_sha256 + SHA256_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ pxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha256 + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 28 bytes (SHA-224) or 32 bytes (SHA-256) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+%ifdef SHA224
+ mov qword [lane_data + _outer_block + 16], 0
+ mov dword [lane_data + _outer_block + 24], 0
+%else
+ movdqa [lane_data + _outer_block + 16], xmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov r12, [rsp + _gpr_save + 8*2]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*3]
+ mov rdi, [rsp + _gpr_save + 8*4]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_flush_sse.asm
new file mode 100644
index 000000000..bc7305001
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_flush_sse.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC flush_job_hmac_sha_384_sse
+%define SHA_X_DIGEST_SIZE 384
+
+%include "sse/mb_mgr_hmac_sha_512_flush_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_submit_sse.asm
new file mode 100644
index 000000000..04d7d3aaf
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_384_submit_sse.asm
@@ -0,0 +1,31 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%define FUNC submit_job_hmac_sha_384_sse
+%define SHA_X_DIGEST_SIZE 384
+
+%include "sse/mb_mgr_hmac_sha_512_submit_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_flush_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_flush_sse.asm
new file mode 100644
index 000000000..40f61fa4d
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_flush_sse.asm
@@ -0,0 +1,331 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+
+extern sha512_x2_sse
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+len_masks:
+ ;ddq 0x0000000000000000000000000000FFFF
+ dq 0x000000000000FFFF, 0x0000000000000000
+ ;ddq 0x000000000000000000000000FFFF0000
+ dq 0x00000000FFFF0000, 0x0000000000000000
+one: dq 1
+
+section .text
+
+%ifndef FUNC
+%define FUNC flush_job_hmac_sha_512_sse
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define idx rbp
+
+%define unused_lanes rbx
+%define lane_data rbx
+%define tmp2 rbx
+
+%define job_rax rax
+%define tmp1 rax
+%define size_offset rax
+%define tmp rax
+%define start_offset rax
+
+%define tmp3 arg1
+
+%define extra_blocks arg2
+%define p arg2
+
+%define tmp4 r8
+
+%define tmp5 r9
+
+%define tmp6 r10
+
+%endif
+
+; This routine clobbers rbx, rbp
+struc STACK
+_gpr_save: resq 2
+_rsp_save: resq 1
+endstruc
+
+%define APPEND(a,b) a %+ b
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_512_OOO *state)
+; arg 1 : rcx : state
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ bt unused_lanes, 16+7
+ jc return_null
+
+ ; find a lane with a non-null job
+ xor idx, idx
+ cmp qword [state + _ldata_sha512 + 1 * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ cmovne idx, [rel one]
+copy_lane_data:
+ ; copy good lane (idx) to empty lanes
+ movdqa xmm0, [state + _lens_sha512]
+ mov tmp, [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*idx]
+
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata_sha512 + I * _SHA512_LANE_DATA_size + _job_in_lane_sha512], 0
+ jne APPEND(skip_,I)
+ mov [state + _args_sha512 + _data_ptr_sha512 + PTR_SZ*I], tmp
+ por xmm0, [rel len_masks + 16*I]
+APPEND(skip_,I):
+%assign I (I+1)
+%endrep
+
+ movdqa [state + _lens_sha512], xmm0
+
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0xA0
+ psubw xmm0, xmm1
+ movdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x2_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+ mov word [state + _lens_sha512 + 2*idx], 1
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+%assign I 0
+%rep (SHA_X_DIGEST_SIZE / (8*16))
+ movq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I)*SHA512_DIGEST_ROW_SIZE]
+ pinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1) *SHA512_DIGEST_ROW_SIZE], 1
+ pshufb xmm0, [rel byteswap]
+ movdqa [lane_data + _outer_block_sha512 + I*16], xmm0
+%assign I (I+1)
+%endrep
+
+ mov tmp, [job + _auth_key_xor_opad]
+%assign I 0
+%rep 4
+ movdqu xmm0, [tmp + I * 16]
+ movq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE], xmm0
+ pextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+ jmp copy_lane_data
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+ mov [state + _lens_sha512 + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp copy_lane_data
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+ ;; copy 32 bytes for SHA512 // 24 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp5)
+%endif
+ jmp clear_ret
+copy_full_digest:
+ ;; copy 32 bytes for SHA512 // 24 bytes for SHA384
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+ mov [p + 0*8], QWORD(tmp2)
+ mov [p + 1*8], QWORD(tmp4)
+ mov [p + 2*8], QWORD(tmp6)
+ mov [p + 3*8], QWORD(tmp5)
+
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp6), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp5), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
+%endif
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp6)
+ bswap QWORD(tmp5)
+%endif
+ mov [p + 4*8], QWORD(tmp2)
+ mov [p + 5*8], QWORD(tmp4)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp6)
+ mov [p + 7*8], QWORD(tmp5)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign I 0
+%rep 2
+ cmp qword [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size) + _job_in_lane_sha512], 0
+ jne APPEND(skip_clear_,I)
+
+ ;; Clear digest (48 bytes for SHA-384, 64 bytes for SHA-512 bytes)
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 6*SHA512_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*I + 7*SHA512_DIGEST_ROW_SIZE], 0
+%endif
+
+ lea lane_data, [state + _ldata_sha512 + (I*_SHA512_LANE_DATA_size)]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 8
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 16], xmm0
+ movdqa [lane_data + _outer_block + 32], xmm0
+%if (SHA_X_DIGEST_SIZE != 384)
+ movdqa [lane_data + _outer_block + 48], xmm0
+%endif
+
+APPEND(skip_clear_,I):
+%assign I (I+1)
+%endrep
+
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_submit_sse.asm
new file mode 100644
index 000000000..0d6da7bce
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_sha_512_submit_sse.asm
@@ -0,0 +1,412 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+extern sha512_x2_sse
+
+section .data
+default rel
+align 16
+byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+section .text
+
+%ifndef FUNC
+%define FUNC submit_job_hmac_sha_512_sse
+%define SHA_X_DIGEST_SIZE 512
+%endif
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rbx, rbp, rsi, rdi
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+; JOB* FUNC(MB_MGR_HMAC_SHA_512_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(FUNC,function,internal)
+FUNC:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512+ lane_data]
+ mov [state + _unused_lanes_sha512], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 7 ; divide by 128, len in terms of sha512 blocks
+
+ mov [lane_data + _job_in_lane_sha512], job
+ mov dword [lane_data + _outer_done_sha512], 0
+
+ movdqa xmm0, [state + _lens_sha512]
+ XPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ movdqa [state + _lens_sha512], xmm0
+
+ mov last_len, len
+ and last_len, 127
+ lea extra_blocks, [last_len + 17 + 127]
+ shr extra_blocks, 7
+ mov [lane_data + _extra_blocks_sha512], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], p
+
+ cmp len, 128
+ jb copy_lt128
+
+fast_copy:
+ add p, len
+%assign I 0
+%rep 2
+ movdqu xmm0, [p - 128 + I*4*16 + 0*16]
+ movdqu xmm1, [p - 128 + I*4*16 + 1*16]
+ movdqu xmm2, [p - 128 + I*4*16 + 2*16]
+ movdqu xmm3, [p - 128 + I*4*16 + 3*16]
+ movdqa [lane_data + _extra_block_sha512 + I*4*16 + 0*16], xmm0
+ movdqa [lane_data + _extra_block_sha512 + I*4*16 + 1*16], xmm1
+ movdqa [lane_data + _extra_block_sha512 + I*4*16 + 2*16], xmm2
+ movdqa [lane_data + _extra_block_sha512 + I*4*16 + 3*16], xmm3
+%assign I (I+1)
+%endrep
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 7
+ sub size_offset, last_len
+ add size_offset, 128-8
+ mov [lane_data + _size_offset_sha512], DWORD(size_offset)
+ mov start_offset, 128
+ sub start_offset, last_len
+ mov [lane_data + _start_offset_sha512], DWORD(start_offset)
+
+ lea tmp, [8*128 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block_sha512 + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ %assign I 0
+ %rep 4
+ movdqu xmm0, [tmp + I * 2 * SHA512_DIGEST_WORD_SIZE]
+ movq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I)*SHA512_DIGEST_ROW_SIZE], xmm0
+ pextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+ %assign I (I+1)
+ %endrep
+ test len, ~127
+ jnz ge128_bytes
+
+lt128_bytes:
+ movdqa xmm0, [state + _lens_sha512]
+ XPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ movdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], tmp ;; 8 to hold a UINT8
+ mov dword [lane_data + _extra_blocks_sha512], 0
+
+ge128_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ movdqa xmm0, [state + _lens_sha512]
+ phminposuw xmm1, xmm0
+ pextrw DWORD(len2), xmm1, 0 ; min value
+ pextrw DWORD(idx), xmm1, 1 ; min index (0...1)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0XA0
+ psubw xmm0, xmm1
+ movdqa [state + _lens_sha512], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha512_x2_sse
+ ; state and idx are intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done_sha512], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done_sha512], 1
+ mov DWORD(size_offset), [lane_data + _size_offset_sha512]
+ mov qword [lane_data + _extra_block_sha512 + size_offset], 0
+
+ movdqa xmm0, [state + _lens_sha512]
+ XPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
+ movdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _outer_block_sha512]
+ mov job, [lane_data + _job_in_lane_sha512]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+
+%assign I 0
+%rep (SHA_X_DIGEST_SIZE / (8 * 16))
+ movq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I)*SHA512_DIGEST_ROW_SIZE]
+ pinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
+ pshufb xmm0, [rel byteswap]
+ movdqa [lane_data + _outer_block_sha512 + I*16], xmm0
+%assign I (I+1)
+%endrep
+
+ mov tmp, [job + _auth_key_xor_opad]
+%assign I 0
+%rep 4
+ movdqu xmm0, [tmp + I*16]
+ movq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE], xmm0
+ pextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
+%assign I (I+1)
+%endrep
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset_sha512]
+
+ movdqa xmm0, [state + _lens_sha512]
+ XPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ movdqa [state + _lens_sha512], xmm0
+
+ lea tmp, [lane_data + _extra_block_sha512 + start_offset]
+ mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks_sha512], 0
+ jmp start_loop
+
+ align 16
+copy_lt128:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extra block but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 128]
+ sub p2, len
+ memcpy_sse_128_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane_sha512]
+ mov unused_lanes, [state + _unused_lanes_sha512]
+ mov qword [lane_data + _job_in_lane_sha512], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes_sha512], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+%if (SHA_X_DIGEST_SIZE != 384)
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
+ jne copy_full_digest
+%else
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
+ jne copy_full_digest
+%endif
+
+ ;; copy 32 bytes for SHA512 / 24 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE] ; this line of code will run only for SHA512
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 3*8], QWORD(tmp4)
+%endif
+ jmp clear_ret
+
+copy_full_digest:
+ ;; copy 64 bytes for SHA512 / 48 bytes for SHA384
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE] ; this line of code will run only for SHA512
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+ mov [p + 0*8], QWORD(tmp)
+ mov [p + 1*8], QWORD(tmp2)
+ mov [p + 2*8], QWORD(tmp3)
+ mov [p + 3*8], QWORD(tmp4)
+ mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
+ mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE] ; this line of code will run only for SHA512
+%endif
+ bswap QWORD(tmp)
+ bswap QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ bswap QWORD(tmp3)
+ bswap QWORD(tmp4)
+%endif
+ mov [p + 4*8], QWORD(tmp)
+ mov [p + 5*8], QWORD(tmp2)
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov [p + 6*8], QWORD(tmp3)
+ mov [p + 7*8], QWORD(tmp4)
+%endif
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
+%assign J 0
+%rep 6
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + J*SHA512_DIGEST_ROW_SIZE], 0
+%assign J (J+1)
+%endrep
+%if (SHA_X_DIGEST_SIZE != 384)
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA256_DIGEST_ROW_SIZE], 0
+ mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
+%endif
+
+ pxor xmm0, xmm0
+ imul lane_data, idx, _SHA512_LANE_DATA_size
+ lea lane_data, [state + _ldata_sha512 + lane_data]
+ ;; Clear first 128 bytes of extra_block
+%assign offset 0
+%rep 8
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ movdqa [lane_data + _outer_block + 16], xmm0
+ movdqa [lane_data + _outer_block + 32], xmm0
+%if (SHA_X_DIGEST_SIZE != 384)
+ movdqa [lane_data + _outer_block + 48], xmm0
+%endif
+%endif ;; SAFE_DATA
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_ni_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_ni_sse.asm
new file mode 100644
index 000000000..e0b0460f4
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_ni_sse.asm
@@ -0,0 +1,370 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; In System V AMD64 ABI
+;; calle saves: RBX, RBP, R12-R15
+;; Windows x64 ABI
+;; calle saves: RBX, RBP, RDI, RSI, RSP, R12-R15
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RAX RCX RDX R8 R9 R10 R11
+;; Windows preserves: RBX RBP RSI RDI R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RAX RCX RDX RSI RDI R8 R9 R10 R11
+;; Linux preserves: RBX RBP R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_ni
+
+section .data
+default rel
+
+align 16
+byteswap:
+ dq 0x0405060700010203
+ dq 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+; idx needs to be in rbx, rbp, r12-r15
+%define last_len rbp
+%define idx rbp
+%define p4 rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+%define p3 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_ni_sse(MB_MGR_HMAC_SHA_1_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_ni_sse,function,internal)
+submit_job_hmac_ni_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha1-ni-sse submit"
+ mov unused_lanes, [state + _unused_lanes]
+ movzx lane, BYTE(unused_lanes)
+ DBGPRINTL64 "lane: ", lane
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov [state + _unused_lanes], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ DBGPRINTL64 "length: ", len
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+ mov [state + _lens + 2*lane], WORD(tmp)
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ DBGPRINTL64 "src pointer + offset:", p
+ mov [state + _args_data_ptr + PTR_SZ*lane], p
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ movdqu xmm0, [p - 64 + 0*16]
+ movdqu xmm1, [p - 64 + 1*16]
+ movdqu xmm2, [p - 64 + 2*16]
+ movdqu xmm3, [p - 64 + 3*16]
+ movdqa [lane_data + _extra_block + 0*16], xmm0
+ movdqa [lane_data + _extra_block + 1*16], xmm1
+ movdqa [lane_data + _extra_block + 2*16], xmm2
+ movdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*SHA1_DIGEST_WORD_SIZE]
+%if SHA1NI_DIGEST_ROW_SIZE != 20
+%error "Below code has been optimized for SHA1NI_DIGEST_ROW_SIZE = 20!"
+%endif
+ lea p4, [lane + lane*4]
+ movdqu [state + _args_digest + p4*4 + 0*SHA1_DIGEST_WORD_SIZE], xmm0
+ mov [state + _args_digest + p4*4 + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ mov [state + _lens + 2*lane], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length - only two lanes available
+ xor len2, len2
+ mov p3, 0x10000
+ mov WORD(len2), word [state + _lens + 0*2] ; [0:15] - lane 0 length, [16:31] - lane index (0)
+ mov WORD(p3), word [state + _lens + 1*2] ; [0:15] - lane 1 length, [16:31] - lane index (1)
+ cmp WORD(len2), WORD(p3)
+ cmovg DWORD(len2), DWORD(p3) ; move if lane 0 length is greater than lane 1 length
+
+ mov idx, len2 ; retrieve index & length from [16:31] and [0:15] bit fields
+ shr DWORD(idx), 16
+ and DWORD(len2), 0xffff
+ je len_is_0
+
+ sub word [state + _lens + 0*2], WORD(len2)
+ sub word [state + _lens + 1*2], WORD(len2)
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_ni
+ ; state is intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+ mov word [state + _lens + 2*idx], 1
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+%if SHA1NI_DIGEST_ROW_SIZE != 20
+%error "Below code has been optimized for SHA1NI_DIGEST_ROW_SIZE = 20!"
+%endif
+ lea p3, [idx + idx*4]
+ movdqu xmm0, [state + _args_digest + p3*4 + 0*SHA1_DIGEST_WORD_SIZE]
+ pshufb xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + p3*4 + 4*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp)
+ movdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*SHA1_DIGEST_WORD_SIZE]
+ movdqu [state + _args_digest + p3*4 + 0*SHA1_DIGEST_WORD_SIZE], xmm0
+ mov [state + _args_digest + p3*4 + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+ mov [state + _lens + 2*idx], WORD(extra_blocks)
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_sse_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+%if SHA1NI_DIGEST_ROW_SIZE != 20
+%error "Below code has been optimized for SHA1NI_DIGEST_ROW_SIZE = 20!"
+%endif
+ lea idx, [idx + 4*idx]
+ mov DWORD(tmp), [state + _args_digest + idx*4 + 0*SHA1_DIGEST_WORD_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + idx*4 + 1*SHA1_DIGEST_WORD_SIZE]
+ mov DWORD(tmp3), [state + _args_digest + idx*4 + 2*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ mov [p + 0*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 1*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 2*SHA1_DIGEST_WORD_SIZE], DWORD(tmp3)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp), [state + _args_digest + idx*4 + 3*SHA1_DIGEST_WORD_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + idx*4 + 4*SHA1_DIGEST_WORD_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ mov [p + 3*4], DWORD(tmp)
+ mov [p + 4*4], DWORD(tmp2)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B)
+ ;; idx = 0 or 5 (depending on lane)
+ movdqu [state + _args_digest + idx*4], xmm0
+ mov dword [state + _args_digest + idx*4 + 16], 0
+
+ shr idx, 2 ;; idx == 5 ? 1 : 0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear 20 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+%endif
+
+return:
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_sse.asm b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_sse.asm
new file mode 100644
index 000000000..bc59e7943
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_submit_sse.asm
@@ -0,0 +1,364 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "job_aes_hmac.asm"
+%include "mb_mgr_datastruct.asm"
+%include "include/reg_sizes.asm"
+%include "include/memcpy.asm"
+%include "include/const.inc"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+extern sha1_mult_sse
+
+section .data
+default rel
+
+align 16
+byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%if 1
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define reg3 rcx
+%define reg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define reg3 rdi
+%define reg4 rsi
+%endif
+
+%define state arg1
+%define job arg2
+%define len2 arg2
+
+
+; idx needs to be in rbx, rbp, r12-r15
+%define last_len rbp
+%define idx rbp
+
+%define p r11
+%define start_offset r11
+
+%define unused_lanes rbx
+%define tmp4 rbx
+
+%define job_rax rax
+%define len rax
+
+%define size_offset reg3
+%define tmp2 reg3
+
+%define lane reg4
+%define tmp3 reg4
+
+%define extra_blocks r8
+
+%define tmp r9
+%define p2 r9
+
+%define lane_data r10
+
+%endif
+
+; This routine clobbers rdi, rsi, rbx, rbp
+struc STACK
+_gpr_save: resq 4
+_rsp_save: resq 1
+endstruc
+
+; JOB* submit_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state, JOB_AES_HMAC *job)
+; arg 1 : rcx : state
+; arg 2 : rdx : job
+MKGLOBAL(submit_job_hmac_sse,function, internal)
+submit_job_hmac_sse:
+
+ mov rax, rsp
+ sub rsp, STACK_size
+ and rsp, -16
+
+ mov [rsp + _gpr_save + 8*0], rbx
+ mov [rsp + _gpr_save + 8*1], rbp
+%ifndef LINUX
+ mov [rsp + _gpr_save + 8*2], rsi
+ mov [rsp + _gpr_save + 8*3], rdi
+%endif
+ mov [rsp + _rsp_save], rax ; original SP
+
+ DBGPRINTL "enter sha1-sse submit"
+ mov unused_lanes, [state + _unused_lanes]
+ movzx lane, BYTE(unused_lanes)
+ shr unused_lanes, 8
+ imul lane_data, lane, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov [state + _unused_lanes], unused_lanes
+ mov len, [job + _msg_len_to_hash_in_bytes]
+ mov tmp, len
+ shr tmp, 6 ; divide by 64, len in terms of blocks
+
+ mov [lane_data + _job_in_lane], job
+ mov dword [lane_data + _outer_done], 0
+
+ movdqa xmm0, [state + _lens]
+ XPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
+ movdqa [state + _lens], xmm0
+
+ mov last_len, len
+ and last_len, 63
+ lea extra_blocks, [last_len + 9 + 63]
+ shr extra_blocks, 6
+ mov [lane_data + _extra_blocks], DWORD(extra_blocks)
+
+ mov p, [job + _src]
+ add p, [job + _hash_start_src_offset_in_bytes]
+ mov [state + _args_data_ptr + PTR_SZ*lane], p
+ cmp len, 64
+ jb copy_lt64
+
+fast_copy:
+ add p, len
+ movdqu xmm0, [p - 64 + 0*16]
+ movdqu xmm1, [p - 64 + 1*16]
+ movdqu xmm2, [p - 64 + 2*16]
+ movdqu xmm3, [p - 64 + 3*16]
+ movdqa [lane_data + _extra_block + 0*16], xmm0
+ movdqa [lane_data + _extra_block + 1*16], xmm1
+ movdqa [lane_data + _extra_block + 2*16], xmm2
+ movdqa [lane_data + _extra_block + 3*16], xmm3
+end_fast_copy:
+
+ mov size_offset, extra_blocks
+ shl size_offset, 6
+ sub size_offset, last_len
+ add size_offset, 64-8
+ mov [lane_data + _size_offset], DWORD(size_offset)
+ mov start_offset, 64
+ sub start_offset, last_len
+ mov [lane_data + _start_offset], DWORD(start_offset)
+
+ lea tmp, [8*64 + 8*len]
+ bswap tmp
+ mov [lane_data + _extra_block + size_offset], tmp
+
+ mov tmp, [job + _auth_key_xor_ipad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ movd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*lane + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+
+ test len, ~63
+ jnz ge64_bytes
+
+lt64_bytes:
+ movdqa xmm0, [state + _lens]
+ XPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
+ movdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*lane], tmp
+ mov dword [lane_data + _extra_blocks], 0
+
+ge64_bytes:
+ cmp unused_lanes, 0xff
+ jne return_null
+ movdqa xmm0, [state + _lens]
+ jmp start_loop
+
+ align 16
+start_loop:
+ ; Find min length
+ phminposuw xmm1, xmm0
+ pextrw len2, xmm1, 0 ; min value
+ pextrw idx, xmm1, 1 ; min index (0...3)
+ cmp len2, 0
+ je len_is_0
+
+ pshuflw xmm1, xmm1, 0
+ psubw xmm0, xmm1
+ movdqa [state + _lens], xmm0
+
+ ; "state" and "args" are the same address, arg1
+ ; len is arg2
+ call sha1_mult_sse
+ ; state is intact
+
+len_is_0:
+ ; process completed job "idx"
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ mov DWORD(extra_blocks), [lane_data + _extra_blocks]
+ cmp extra_blocks, 0
+ jne proc_extra_blocks
+ cmp dword [lane_data + _outer_done], 0
+ jne end_loop
+
+proc_outer:
+ mov dword [lane_data + _outer_done], 1
+ mov DWORD(size_offset), [lane_data + _size_offset]
+ mov qword [lane_data + _extra_block + size_offset], 0
+
+ movdqa xmm1, [state + _lens]
+ XPINSRW xmm1, xmm2, tmp, idx, 1, scale_x16
+ movdqa [state + _lens], xmm1
+
+ lea tmp, [lane_data + _outer_block]
+ mov job, [lane_data + _job_in_lane]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+
+ movd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
+ pinsrd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
+ pshufb xmm0, [rel byteswap]
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ movdqa [lane_data + _outer_block], xmm0
+ mov [lane_data + _outer_block + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+
+ mov tmp, [job + _auth_key_xor_opad]
+ movdqu xmm0, [tmp]
+ mov DWORD(tmp), [tmp + 4*4]
+ movd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
+ pextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
+ mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
+ movdqa xmm0, xmm1
+ jmp start_loop
+
+ align 16
+proc_extra_blocks:
+ mov DWORD(start_offset), [lane_data + _start_offset]
+
+ movdqa xmm0, [state + _lens]
+ XPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
+ movdqa [state + _lens], xmm0
+
+ lea tmp, [lane_data + _extra_block + start_offset]
+ mov [state + _args_data_ptr + PTR_SZ*idx], tmp
+ mov dword [lane_data + _extra_blocks], 0
+ jmp start_loop
+
+ align 16
+copy_lt64:
+ ;; less than one message block of data
+ ;; beginning of source block
+ ;; destination extrablock but backwards by len from where 0x80 pre-populated
+ lea p2, [lane_data + _extra_block + 64]
+ sub p2, len
+ memcpy_sse_64_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
+ mov unused_lanes, [state + _unused_lanes]
+ jmp end_fast_copy
+
+return_null:
+ xor job_rax, job_rax
+ jmp return
+
+ align 16
+end_loop:
+ mov job_rax, [lane_data + _job_in_lane]
+ mov unused_lanes, [state + _unused_lanes]
+ mov qword [lane_data + _job_in_lane], 0
+ or dword [job_rax + _status], STS_COMPLETED_HMAC
+ shl unused_lanes, 8
+ or unused_lanes, idx
+ mov [state + _unused_lanes], unused_lanes
+
+ mov p, [job_rax + _auth_tag_output]
+
+ ; copy 12 bytes
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp3), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ bswap DWORD(tmp3)
+ mov [p + 0*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 1*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+ mov [p + 2*SHA1_DIGEST_WORD_SIZE], DWORD(tmp3)
+
+ cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
+ je clear_ret
+
+ ;; copy remaining 8 bytes to return 20 byte digest
+ mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
+ mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
+ bswap DWORD(tmp)
+ bswap DWORD(tmp2)
+ mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(tmp)
+ mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(tmp2)
+
+clear_ret:
+
+%ifdef SAFE_DATA
+ ;; Clear digest (20B), outer_block (20B) and extra_block (64B) of returned job
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 0
+ mov dword [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], 0
+
+ pxor xmm0, xmm0
+ imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
+ lea lane_data, [state + _ldata + lane_data]
+ ;; Clear first 64 bytes of extra_block
+%assign offset 0
+%rep 4
+ movdqa [lane_data + _extra_block + offset], xmm0
+%assign offset (offset + 16)
+%endrep
+
+ ;; Clear first 20 bytes of outer_block
+ movdqa [lane_data + _outer_block], xmm0
+ mov dword [lane_data + _outer_block + 16], 0
+%endif
+
+return:
+
+ mov rbx, [rsp + _gpr_save + 8*0]
+ mov rbp, [rsp + _gpr_save + 8*1]
+%ifndef LINUX
+ mov rsi, [rsp + _gpr_save + 8*2]
+ mov rdi, [rsp + _gpr_save + 8*3]
+%endif
+ mov rsp, [rsp + _rsp_save] ; original SP
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/mb_mgr_sse.c b/src/spdk/intel-ipsec-mb/sse/mb_mgr_sse.c
new file mode 100644
index 000000000..4d862cba2
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/mb_mgr_sse.c
@@ -0,0 +1,809 @@
+/*******************************************************************************
+ Copyright (c) 2012-2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+#include "intel-ipsec-mb.h"
+#include "include/kasumi_internal.h"
+#include "include/zuc_internal.h"
+#include "include/snow3g.h"
+
+#include "save_xmms.h"
+#include "asm.h"
+#include "des.h"
+#include "cpu_feature.h"
+#include "noaesni.h"
+
+JOB_AES_HMAC *submit_job_aes128_enc_sse(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes128_enc_sse(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes192_enc_sse(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes192_enc_sse(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes256_enc_sse(MB_MGR_AES_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes256_enc_sse(MB_MGR_AES_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sse(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_ni_sse(MB_MGR_HMAC_SHA_1_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_ni_sse(MB_MGR_HMAC_SHA_1_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_224_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_224_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_256_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_256_ni_sse(MB_MGR_HMAC_SHA_256_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_384_sse(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_384_sse(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_sha_512_sse(MB_MGR_HMAC_SHA_512_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_sha_512_sse(MB_MGR_HMAC_SHA_512_OOO *state);
+
+JOB_AES_HMAC *submit_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state);
+
+
+JOB_AES_HMAC *submit_job_aes_xcbc_sse(MB_MGR_AES_XCBC_OOO *state,
+ JOB_AES_HMAC *job);
+JOB_AES_HMAC *flush_job_aes_xcbc_sse(MB_MGR_AES_XCBC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cmac_auth_sse(MB_MGR_CMAC_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_cmac_auth_sse(MB_MGR_CMAC_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_ccm_auth_sse(MB_MGR_CCM_OOO *state,
+ JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *flush_job_aes_ccm_auth_sse(MB_MGR_CCM_OOO *state);
+
+JOB_AES_HMAC *submit_job_aes_cntr_sse(JOB_AES_HMAC *job);
+
+JOB_AES_HMAC *submit_job_aes_cntr_bit_sse(JOB_AES_HMAC *job);
+
+#define SAVE_XMMS save_xmms
+#define RESTORE_XMMS restore_xmms
+
+#define SUBMIT_JOB_AES128_ENC submit_job_aes128_enc_sse
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_sse
+#define FLUSH_JOB_AES128_ENC flush_job_aes128_enc_sse
+#define SUBMIT_JOB_AES192_ENC submit_job_aes192_enc_sse
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_sse
+#define FLUSH_JOB_AES192_ENC flush_job_aes192_enc_sse
+#define SUBMIT_JOB_AES256_ENC submit_job_aes256_enc_sse
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_sse
+#define FLUSH_JOB_AES256_ENC flush_job_aes256_enc_sse
+#define SUBMIT_JOB_AES_ECB_128_ENC submit_job_aes_ecb_128_enc_sse
+#define SUBMIT_JOB_AES_ECB_128_DEC submit_job_aes_ecb_128_dec_sse
+#define SUBMIT_JOB_AES_ECB_192_ENC submit_job_aes_ecb_192_enc_sse
+#define SUBMIT_JOB_AES_ECB_192_DEC submit_job_aes_ecb_192_dec_sse
+#define SUBMIT_JOB_AES_ECB_256_ENC submit_job_aes_ecb_256_enc_sse
+#define SUBMIT_JOB_AES_ECB_256_DEC submit_job_aes_ecb_256_dec_sse
+#define SUBMIT_JOB_HMAC submit_job_hmac_sse
+#define FLUSH_JOB_HMAC flush_job_hmac_sse
+#define SUBMIT_JOB_HMAC_NI submit_job_hmac_ni_sse
+#define FLUSH_JOB_HMAC_NI flush_job_hmac_ni_sse
+#define SUBMIT_JOB_HMAC_SHA_224 submit_job_hmac_sha_224_sse
+#define FLUSH_JOB_HMAC_SHA_224 flush_job_hmac_sha_224_sse
+#define SUBMIT_JOB_HMAC_SHA_224_NI submit_job_hmac_sha_224_ni_sse
+#define FLUSH_JOB_HMAC_SHA_224_NI flush_job_hmac_sha_224_ni_sse
+#define SUBMIT_JOB_HMAC_SHA_256 submit_job_hmac_sha_256_sse
+#define FLUSH_JOB_HMAC_SHA_256 flush_job_hmac_sha_256_sse
+#define SUBMIT_JOB_HMAC_SHA_256_NI submit_job_hmac_sha_256_ni_sse
+#define FLUSH_JOB_HMAC_SHA_256_NI flush_job_hmac_sha_256_ni_sse
+#define SUBMIT_JOB_HMAC_SHA_384 submit_job_hmac_sha_384_sse
+#define FLUSH_JOB_HMAC_SHA_384 flush_job_hmac_sha_384_sse
+#define SUBMIT_JOB_HMAC_SHA_512 submit_job_hmac_sha_512_sse
+#define FLUSH_JOB_HMAC_SHA_512 flush_job_hmac_sha_512_sse
+#define SUBMIT_JOB_HMAC_MD5 submit_job_hmac_md5_sse
+#define FLUSH_JOB_HMAC_MD5 flush_job_hmac_md5_sse
+#define SUBMIT_JOB_AES_XCBC submit_job_aes_xcbc_sse
+#define FLUSH_JOB_AES_XCBC flush_job_aes_xcbc_sse
+
+#define SUBMIT_JOB_AES_CNTR submit_job_aes_cntr_sse
+#define SUBMIT_JOB_AES_CNTR_BIT submit_job_aes_cntr_bit_sse
+
+#define AES_CBC_DEC_128 aes_cbc_dec_128_sse
+#define AES_CBC_DEC_192 aes_cbc_dec_192_sse
+#define AES_CBC_DEC_256 aes_cbc_dec_256_sse
+
+#define AES_CNTR_128 aes_cntr_128_sse
+#define AES_CNTR_192 aes_cntr_192_sse
+#define AES_CNTR_256 aes_cntr_256_sse
+
+#define AES_CNTR_CCM_128 aes_cntr_ccm_128_sse
+
+#define AES_ECB_ENC_128 aes_ecb_enc_128_sse
+#define AES_ECB_ENC_192 aes_ecb_enc_192_sse
+#define AES_ECB_ENC_256 aes_ecb_enc_256_sse
+#define AES_ECB_DEC_128 aes_ecb_dec_128_sse
+#define AES_ECB_DEC_192 aes_ecb_dec_192_sse
+#define AES_ECB_DEC_256 aes_ecb_dec_256_sse
+
+#define SUBMIT_JOB_PON_ENC submit_job_pon_enc_sse
+#define SUBMIT_JOB_PON_DEC submit_job_pon_dec_sse
+#define SUBMIT_JOB_PON_ENC_NO_CTR submit_job_pon_enc_no_ctr_sse
+#define SUBMIT_JOB_PON_DEC_NO_CTR submit_job_pon_dec_no_ctr_sse
+
+#ifndef NO_GCM
+#define AES_GCM_DEC_128 aes_gcm_dec_128_sse
+#define AES_GCM_ENC_128 aes_gcm_enc_128_sse
+#define AES_GCM_DEC_192 aes_gcm_dec_192_sse
+#define AES_GCM_ENC_192 aes_gcm_enc_192_sse
+#define AES_GCM_DEC_256 aes_gcm_dec_256_sse
+#define AES_GCM_ENC_256 aes_gcm_enc_256_sse
+
+#define SUBMIT_JOB_AES_GCM_DEC submit_job_aes_gcm_dec_sse
+#define FLUSH_JOB_AES_GCM_DEC flush_job_aes_gcm_dec_sse
+#define SUBMIT_JOB_AES_GCM_ENC submit_job_aes_gcm_enc_sse
+#define FLUSH_JOB_AES_GCM_ENC flush_job_aes_gcm_enc_sse
+#endif /* NO_GCM */
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB submit_job_sse
+#define FLUSH_JOB flush_job_sse
+#define SUBMIT_JOB_NOCHECK submit_job_nocheck_sse
+#define GET_NEXT_JOB get_next_job_sse
+#define GET_COMPLETED_JOB get_completed_job_sse
+
+#define SUBMIT_JOB_AES128_DEC submit_job_aes128_dec_sse
+#define SUBMIT_JOB_AES192_DEC submit_job_aes192_dec_sse
+#define SUBMIT_JOB_AES256_DEC submit_job_aes256_dec_sse
+#define QUEUE_SIZE queue_size_sse
+
+/* ====================================================================== */
+
+#define SUBMIT_JOB_AES_ENC SUBMIT_JOB_AES_ENC_SSE
+#define FLUSH_JOB_AES_ENC FLUSH_JOB_AES_ENC_SSE
+#define SUBMIT_JOB_AES_DEC SUBMIT_JOB_AES_DEC_SSE
+#define SUBMIT_JOB_HASH SUBMIT_JOB_HASH_SSE
+#define FLUSH_JOB_HASH FLUSH_JOB_HASH_SSE
+
+/* ====================================================================== */
+
+#define AES_CFB_128_ONE aes_cfb_128_one_sse
+
+void aes128_cbc_mac_x4(AES_ARGS *args, uint64_t len);
+
+#define AES128_CBC_MAC aes128_cbc_mac_x4
+
+#define FLUSH_JOB_AES_CCM_AUTH flush_job_aes_ccm_auth_sse
+#define SUBMIT_JOB_AES_CCM_AUTH submit_job_aes_ccm_auth_sse
+
+#define FLUSH_JOB_AES_CMAC_AUTH flush_job_aes_cmac_auth_sse
+#define SUBMIT_JOB_AES_CMAC_AUTH submit_job_aes_cmac_auth_sse
+
+/* ====================================================================== */
+
+/*
+ * Used to decide if SHA1/SHA256 SIMD or SHA1NI OOO scheduler should be
+ * called.
+ */
+#define HASH_USE_SHAEXT 1
+
+
+/* ====================================================================== */
+
+/*
+ * GCM submit / flush API for SSE arch
+ */
+#ifndef NO_GCM
+static JOB_AES_HMAC *
+submit_job_aes_gcm_dec_sse(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_128(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_DEC_192(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_DEC_256(job->aes_dec_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_dec_sse(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+
+static JOB_AES_HMAC *
+submit_job_aes_gcm_enc_sse(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ DECLARE_ALIGNED(struct gcm_context_data ctx, 16);
+ (void) state;
+
+ if (16 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_128(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_GCM_ENC_192(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_GCM_ENC_256(job->aes_enc_key_expanded, &ctx, job->dst,
+ job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->msg_len_to_cipher_in_bytes, job->iv,
+ job->u.GCM.aad, job->u.GCM.aad_len_in_bytes,
+ job->auth_tag_output,
+ job->auth_tag_output_len_in_bytes);
+
+ job->status = STS_COMPLETED;
+ return job;
+}
+
+static JOB_AES_HMAC *
+flush_job_aes_gcm_enc_sse(MB_MGR *state, JOB_AES_HMAC *job)
+{
+ (void) state;
+ (void) job;
+ return NULL;
+}
+#endif /* NO_GCM */
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_sse(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ AES_CNTR_128(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ AES_CNTR_192(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ AES_CNTR_256(job->src + job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bytes,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+IMB_DLL_LOCAL JOB_AES_HMAC *
+submit_job_aes_cntr_bit_sse(JOB_AES_HMAC *job)
+{
+ if (16 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_128_sse(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else if (24 == job->aes_key_len_in_bytes)
+ aes_cntr_bit_192_sse(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+ else /* assume 32 bytes */
+ aes_cntr_bit_256_sse(job->src +
+ job->cipher_start_src_offset_in_bytes,
+ job->iv,
+ job->aes_enc_key_expanded,
+ job->dst,
+ job->msg_len_to_cipher_in_bits,
+ job->iv_len_in_bytes);
+
+ job->status |= STS_COMPLETED_AES;
+ return job;
+}
+
+/* ====================================================================== */
+
+void
+init_mb_mgr_sse(MB_MGR *state)
+{
+ unsigned int j;
+ uint8_t *p;
+ size_t size;
+
+ state->features = cpu_feature_adjust(state->flags,
+ cpu_feature_detect());
+
+ if (!(state->features & IMB_FEATURE_AESNI)) {
+ init_mb_mgr_sse_no_aesni(state);
+ return;
+ }
+
+ /* Init AES out-of-order fields */
+ memset(state->aes128_ooo.lens, 0xFF,
+ sizeof(state->aes128_ooo.lens));
+ memset(&state->aes128_ooo.lens[0], 0,
+ sizeof(state->aes128_ooo.lens[0]) * 4);
+ memset(state->aes128_ooo.job_in_lane, 0,
+ sizeof(state->aes128_ooo.job_in_lane));
+ state->aes128_ooo.unused_lanes = 0xFF03020100;
+ state->aes128_ooo.num_lanes_inuse = 0;
+
+
+ memset(state->aes192_ooo.lens, 0xFF,
+ sizeof(state->aes192_ooo.lens));
+ memset(&state->aes192_ooo.lens[0], 0,
+ sizeof(state->aes192_ooo.lens[0]) * 4);
+ memset(state->aes192_ooo.job_in_lane, 0,
+ sizeof(state->aes192_ooo.job_in_lane));
+ state->aes192_ooo.unused_lanes = 0xFF03020100;
+ state->aes192_ooo.num_lanes_inuse = 0;
+
+
+ memset(state->aes256_ooo.lens, 0xFF,
+ sizeof(state->aes256_ooo.lens));
+ memset(&state->aes256_ooo.lens[0], 0,
+ sizeof(state->aes256_ooo.lens[0]) * 4);
+ memset(state->aes256_ooo.job_in_lane, 0,
+ sizeof(state->aes256_ooo.job_in_lane));
+ state->aes256_ooo.unused_lanes = 0xFF03020100;
+ state->aes256_ooo.num_lanes_inuse = 0;
+
+
+ /* DOCSIS SEC BPI uses same settings as AES128 CBC */
+ memset(state->docsis_sec_ooo.lens, 0xFF,
+ sizeof(state->docsis_sec_ooo.lens));
+ memset(&state->docsis_sec_ooo.lens[0], 0,
+ sizeof(state->docsis_sec_ooo.lens[0]) * 4);
+ memset(state->docsis_sec_ooo.job_in_lane, 0,
+ sizeof(state->docsis_sec_ooo.job_in_lane));
+ state->docsis_sec_ooo.unused_lanes = 0xFF03020100;
+ state->docsis_sec_ooo.num_lanes_inuse = 0;
+
+
+ /* Init HMAC/SHA1 out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0;
+ state->hmac_sha_1_ooo.lens[3] = 0;
+ state->hmac_sha_1_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_1_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA1_LANES; j++) {
+ state->hmac_sha_1_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_1_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_1_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_1_ooo.ldata[j].outer_block;
+ memset(p + 5*4 + 1,
+ 0x00,
+ 64 - 5*4 - 1 - 2);
+ p[5*4] = 0x80;
+ p[64-2] = 0x02;
+ p[64-1] = 0xA0;
+ }
+
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI) {
+ /* Init HMAC/SHA1 NI out-of-order fields */
+ state->hmac_sha_1_ooo.lens[0] = 0;
+ state->hmac_sha_1_ooo.lens[1] = 0;
+ state->hmac_sha_1_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_1_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_1_ooo.unused_lanes = 0xFF0100;
+ }
+#endif /* HASH_USE_SHAEXT */
+
+ /* Init HMAC/SHA224 out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0;
+ state->hmac_sha_224_ooo.lens[3] = 0;
+ state->hmac_sha_224_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_224_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_224_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_sha_224_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_sha_224_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_sha_224_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[7*4] = 0x80; /* digest 7 words long */
+ p[64-2] = 0x02; /* length in little endian = 0x02E0 */
+ p[64-1] = 0xE0;
+ }
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI) {
+ /* Init HMAC/SHA224 NI out-of-order fields */
+ state->hmac_sha_224_ooo.lens[0] = 0;
+ state->hmac_sha_224_ooo.lens[1] = 0;
+ state->hmac_sha_224_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_224_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_224_ooo.unused_lanes = 0xFF0100;
+ }
+#endif /* HASH_USE_SHAEXT */
+
+ /* Init HMAC/SHA_256 out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0;
+ state->hmac_sha_256_ooo.lens[3] = 0;
+ state->hmac_sha_256_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_256_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < SSE_NUM_SHA256_LANES; j++) {
+ state->hmac_sha_256_ooo.ldata[j].job_in_lane = NULL;
+ state->hmac_sha_256_ooo.ldata[j].extra_block[64] = 0x80;
+ memset(state->hmac_sha_256_ooo.ldata[j].extra_block + 65,
+ 0x00,
+ 64+7);
+ p = state->hmac_sha_256_ooo.ldata[j].outer_block;
+ memset(p + 8*4 + 1,
+ 0x00,
+ 64 - 8*4 - 1 - 2); /* digest is 8*4 bytes long */
+ p[8*4] = 0x80;
+ p[64-2] = 0x03; /* length of (opad (64*8) bits + 256 bits)
+ * in hex is 0x300 */
+ p[64-1] = 0x00;
+ }
+#ifdef HASH_USE_SHAEXT
+ if (state->features & IMB_FEATURE_SHANI) {
+ /* Init HMAC/SHA256 NI out-of-order fields */
+ state->hmac_sha_256_ooo.lens[0] = 0;
+ state->hmac_sha_256_ooo.lens[1] = 0;
+ state->hmac_sha_256_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_256_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_256_ooo.unused_lanes = 0xFF0100;
+ }
+#endif /* HASH_USE_SHAEXT */
+
+ /* Init HMAC/SHA384 out-of-order fields */
+ state->hmac_sha_384_ooo.lens[0] = 0;
+ state->hmac_sha_384_ooo.lens[1] = 0;
+ state->hmac_sha_384_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_384_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_384_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < SSE_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_384_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_384_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_384_BLOCK_SIZE + 1),
+ 0x00, SHA_384_BLOCK_SIZE + 7);
+
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA384_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_384_BLOCK_SIZE -
+ SHA384_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ p[SHA384_DIGEST_SIZE_IN_BYTES] = 0x80; /* mark the end */
+ /*
+ * hmac outer block length always of fixed size, it is OKey
+ * length, a whole message block length, 1024 bits, with padding
+ * plus the length of the inner digest, which is 384 bits
+ * 1408 bits == 0x0580. The input message block needs to be
+ * converted to big endian within the sha implementation
+ * before use.
+ */
+ p[SHA_384_BLOCK_SIZE - 2] = 0x05;
+ p[SHA_384_BLOCK_SIZE - 1] = 0x80;
+ }
+
+ /* Init HMAC/SHA512 out-of-order fields */
+ state->hmac_sha_512_ooo.lens[0] = 0;
+ state->hmac_sha_512_ooo.lens[1] = 0;
+ state->hmac_sha_512_ooo.lens[2] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[3] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[4] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[5] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[6] = 0xFFFF;
+ state->hmac_sha_512_ooo.lens[7] = 0xFFFF;
+ state->hmac_sha_512_ooo.unused_lanes = 0xFF0100;
+ for (j = 0; j < SSE_NUM_SHA512_LANES; j++) {
+ MB_MGR_HMAC_SHA_512_OOO *ctx = &state->hmac_sha_512_ooo;
+
+ ctx->ldata[j].job_in_lane = NULL;
+ ctx->ldata[j].extra_block[SHA_512_BLOCK_SIZE] = 0x80;
+ memset(ctx->ldata[j].extra_block + (SHA_512_BLOCK_SIZE + 1),
+ 0x00, SHA_512_BLOCK_SIZE + 7);
+
+ p = ctx->ldata[j].outer_block;
+ memset(p + SHA512_DIGEST_SIZE_IN_BYTES + 1, 0x00,
+ /* special end point because this length is constant */
+ SHA_512_BLOCK_SIZE -
+ SHA512_DIGEST_SIZE_IN_BYTES - 1 - 2);
+ p[SHA512_DIGEST_SIZE_IN_BYTES] = 0x80; /* mark the end */
+ /*
+ * hmac outer block length always of fixed size, it is OKey
+ * length, a whole message block length, 1024 bits, with padding
+ * plus the length of the inner digest, which is 512 bits
+ * 1536 bits == 0x600. The input message block needs to be
+ * converted to big endian within the sha implementation
+ * before use.
+ */
+ p[SHA_512_BLOCK_SIZE - 2] = 0x06;
+ p[SHA_512_BLOCK_SIZE - 1] = 0x00;
+ }
+
+ /* Init HMAC/MD5 out-of-order fields */
+ state->hmac_md5_ooo.lens[0] = 0;
+ state->hmac_md5_ooo.lens[1] = 0;
+ state->hmac_md5_ooo.lens[2] = 0;
+ state->hmac_md5_ooo.lens[3] = 0;
+ state->hmac_md5_ooo.lens[4] = 0;
+ state->hmac_md5_ooo.lens[5] = 0;
+ state->hmac_md5_ooo.lens[6] = 0;
+ state->hmac_md5_ooo.lens[7] = 0;
+ state->hmac_md5_ooo.lens[8] = 0xFFFF;
+ state->hmac_md5_ooo.lens[9] = 0xFFFF;
+ state->hmac_md5_ooo.lens[10] = 0xFFFF;
+ state->hmac_md5_ooo.lens[11] = 0xFFFF;
+ state->hmac_md5_ooo.lens[12] = 0xFFFF;
+ state->hmac_md5_ooo.lens[13] = 0xFFFF;
+ state->hmac_md5_ooo.lens[14] = 0xFFFF;
+ state->hmac_md5_ooo.lens[15] = 0xFFFF;
+ state->hmac_md5_ooo.unused_lanes = 0xF76543210;
+ for (j = 0; j < SSE_NUM_MD5_LANES; j++) {
+ state->hmac_md5_ooo.ldata[j].job_in_lane = NULL;
+
+ p = state->hmac_md5_ooo.ldata[j].extra_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].extra_block);
+ memset (p, 0x00, size);
+ p[64] = 0x80;
+
+ p = state->hmac_md5_ooo.ldata[j].outer_block;
+ size = sizeof(state->hmac_md5_ooo.ldata[j].outer_block);
+ memset(p, 0x00, size);
+ p[4*4] = 0x80;
+ p[64-7] = 0x02;
+ p[64-8] = 0x80;
+ }
+
+ /* Init AES/XCBC OOO fields */
+ state->aes_xcbc_ooo.lens[0] = 0;
+ state->aes_xcbc_ooo.lens[1] = 0;
+ state->aes_xcbc_ooo.lens[2] = 0;
+ state->aes_xcbc_ooo.lens[3] = 0;
+ state->aes_xcbc_ooo.lens[4] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[5] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[6] = 0xFFFF;
+ state->aes_xcbc_ooo.lens[7] = 0xFFFF;
+ state->aes_xcbc_ooo.unused_lanes = 0xFF03020100;
+ for (j = 0; j < 4; j++) {
+ state->aes_xcbc_ooo.ldata[j].job_in_lane = NULL;
+ state->aes_xcbc_ooo.ldata[j].final_block[16] = 0x80;
+ memset(state->aes_xcbc_ooo.ldata[j].final_block + 17, 0x00, 15);
+ }
+
+ /* Init AES-CCM auth out-of-order fields */
+ for (j = 0; j < 4; j++) {
+ state->aes_ccm_ooo.init_done[j] = 0;
+ state->aes_ccm_ooo.lens[j] = 0;
+ state->aes_ccm_ooo.job_in_lane[j] = NULL;
+ }
+ for (; j < 8; j++)
+ state->aes_ccm_ooo.lens[j] = 0xFFFF;
+
+ state->aes_ccm_ooo.unused_lanes = 0xF3210;
+
+ /* Init AES-CMAC auth out-of-order fields */
+ state->aes_cmac_ooo.lens[0] = 0;
+ state->aes_cmac_ooo.lens[1] = 0;
+ state->aes_cmac_ooo.lens[2] = 0;
+ state->aes_cmac_ooo.lens[3] = 0;
+ state->aes_cmac_ooo.lens[4] = 0xFFFF;
+ state->aes_cmac_ooo.lens[5] = 0xFFFF;
+ state->aes_cmac_ooo.lens[6] = 0xFFFF;
+ state->aes_cmac_ooo.lens[7] = 0xFFFF;
+ for (j = 0; j < 4; j++) {
+ state->aes_cmac_ooo.init_done[j] = 0;
+ state->aes_cmac_ooo.job_in_lane[j] = NULL;
+ }
+ state->aes_cmac_ooo.unused_lanes = 0xF3210;
+
+ /* Init "in order" components */
+ state->next_job = 0;
+ state->earliest_job = -1;
+
+ /* set SSE handlers */
+ state->get_next_job = get_next_job_sse;
+ state->submit_job = submit_job_sse;
+ state->submit_job_nocheck = submit_job_nocheck_sse;
+ state->get_completed_job = get_completed_job_sse;
+ state->flush_job = flush_job_sse;
+ state->queue_size = queue_size_sse;
+ state->keyexp_128 = aes_keyexp_128_sse;
+ state->keyexp_192 = aes_keyexp_192_sse;
+ state->keyexp_256 = aes_keyexp_256_sse;
+ state->cmac_subkey_gen_128 = aes_cmac_subkey_gen_sse;
+ state->xcbc_keyexp = aes_xcbc_expand_key_sse;
+ state->des_key_sched = des_key_schedule;
+ state->sha1_one_block = sha1_one_block_sse;
+ state->sha1 = sha1_sse;
+ state->sha224_one_block = sha224_one_block_sse;
+ state->sha224 = sha224_sse;
+ state->sha256_one_block = sha256_one_block_sse;
+ state->sha256 = sha256_sse;
+ state->sha384_one_block = sha384_one_block_sse;
+ state->sha384 = sha384_sse;
+ state->sha512_one_block = sha512_one_block_sse;
+ state->sha512 = sha512_sse;
+ state->md5_one_block = md5_one_block_sse;
+ state->aes128_cfb_one = aes_cfb_128_one_sse;
+
+ state->eea3_1_buffer = zuc_eea3_1_buffer_sse;
+ state->eea3_4_buffer = zuc_eea3_4_buffer_sse;
+ state->eea3_n_buffer = zuc_eea3_n_buffer_sse;
+ state->eia3_1_buffer = zuc_eia3_1_buffer_sse;
+
+ state->f8_1_buffer = kasumi_f8_1_buffer_sse;
+ state->f8_1_buffer_bit = kasumi_f8_1_buffer_bit_sse;
+ state->f8_2_buffer = kasumi_f8_2_buffer_sse;
+ state->f8_3_buffer = kasumi_f8_3_buffer_sse;
+ state->f8_4_buffer = kasumi_f8_4_buffer_sse;
+ state->f8_n_buffer = kasumi_f8_n_buffer_sse;
+ state->f9_1_buffer = kasumi_f9_1_buffer_sse;
+ state->f9_1_buffer_user = kasumi_f9_1_buffer_user_sse;
+ state->kasumi_init_f8_key_sched = kasumi_init_f8_key_sched_sse;
+ state->kasumi_init_f9_key_sched = kasumi_init_f9_key_sched_sse;
+ state->kasumi_key_sched_size = kasumi_key_sched_size_sse;
+
+ state->snow3g_f8_1_buffer_bit = snow3g_f8_1_buffer_bit_sse;
+ state->snow3g_f8_1_buffer = snow3g_f8_1_buffer_sse;
+ state->snow3g_f8_2_buffer = snow3g_f8_2_buffer_sse;
+ state->snow3g_f8_4_buffer = snow3g_f8_4_buffer_sse;
+ state->snow3g_f8_8_buffer = snow3g_f8_8_buffer_sse;
+ state->snow3g_f8_n_buffer = snow3g_f8_n_buffer_sse;
+ state->snow3g_f8_8_buffer_multikey = snow3g_f8_8_buffer_multikey_sse;
+ state->snow3g_f8_n_buffer_multikey = snow3g_f8_n_buffer_multikey_sse;
+ state->snow3g_f9_1_buffer = snow3g_f9_1_buffer_sse;
+ state->snow3g_init_key_sched = snow3g_init_key_sched_sse;
+ state->snow3g_key_sched_size = snow3g_key_sched_size_sse;
+
+#ifndef NO_GCM
+ state->gcm128_enc = aes_gcm_enc_128_sse;
+ state->gcm192_enc = aes_gcm_enc_192_sse;
+ state->gcm256_enc = aes_gcm_enc_256_sse;
+ state->gcm128_dec = aes_gcm_dec_128_sse;
+ state->gcm192_dec = aes_gcm_dec_192_sse;
+ state->gcm256_dec = aes_gcm_dec_256_sse;
+ state->gcm128_init = aes_gcm_init_128_sse;
+ state->gcm192_init = aes_gcm_init_192_sse;
+ state->gcm256_init = aes_gcm_init_256_sse;
+ state->gcm128_enc_update = aes_gcm_enc_128_update_sse;
+ state->gcm192_enc_update = aes_gcm_enc_192_update_sse;
+ state->gcm256_enc_update = aes_gcm_enc_256_update_sse;
+ state->gcm128_dec_update = aes_gcm_dec_128_update_sse;
+ state->gcm192_dec_update = aes_gcm_dec_192_update_sse;
+ state->gcm256_dec_update = aes_gcm_dec_256_update_sse;
+ state->gcm128_enc_finalize = aes_gcm_enc_128_finalize_sse;
+ state->gcm192_enc_finalize = aes_gcm_enc_192_finalize_sse;
+ state->gcm256_enc_finalize = aes_gcm_enc_256_finalize_sse;
+ state->gcm128_dec_finalize = aes_gcm_dec_128_finalize_sse;
+ state->gcm192_dec_finalize = aes_gcm_dec_192_finalize_sse;
+ state->gcm256_dec_finalize = aes_gcm_dec_256_finalize_sse;
+ state->gcm128_precomp = aes_gcm_precomp_128_sse;
+ state->gcm192_precomp = aes_gcm_precomp_192_sse;
+ state->gcm256_precomp = aes_gcm_precomp_256_sse;
+ state->gcm128_pre = aes_gcm_pre_128_sse;
+ state->gcm192_pre = aes_gcm_pre_192_sse;
+ state->gcm256_pre = aes_gcm_pre_256_sse;
+#endif
+}
+
+#include "mb_mgr_code.h"
diff --git a/src/spdk/intel-ipsec-mb/sse/md5_x4x2_sse.asm b/src/spdk/intel-ipsec-mb/sse/md5_x4x2_sse.asm
new file mode 100644
index 000000000..581e3fade
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/md5_x4x2_sse.asm
@@ -0,0 +1,787 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute octal MD5 using SSE
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rbx rdx rsi rdi r8 r9 r10 r11 r12 r13 r14 r15
+;; Windows preserves: rcx rbp
+;;
+;; Linux clobbers: rax rbx rcx rdx rsi r8 r9 r10 r11 r12 r13 r14 r15
+;; Linux preserves: rdi rbp
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+section .data align=64
+default rel
+
+align 64
+MKGLOBAL(MD5_TABLE,data,internal)
+MD5_TABLE:
+ dd 0xd76aa478, 0xd76aa478, 0xd76aa478, 0xd76aa478
+ dd 0xe8c7b756, 0xe8c7b756, 0xe8c7b756, 0xe8c7b756
+ dd 0x242070db, 0x242070db, 0x242070db, 0x242070db
+ dd 0xc1bdceee, 0xc1bdceee, 0xc1bdceee, 0xc1bdceee
+ dd 0xf57c0faf, 0xf57c0faf, 0xf57c0faf, 0xf57c0faf
+ dd 0x4787c62a, 0x4787c62a, 0x4787c62a, 0x4787c62a
+ dd 0xa8304613, 0xa8304613, 0xa8304613, 0xa8304613
+ dd 0xfd469501, 0xfd469501, 0xfd469501, 0xfd469501
+ dd 0x698098d8, 0x698098d8, 0x698098d8, 0x698098d8
+ dd 0x8b44f7af, 0x8b44f7af, 0x8b44f7af, 0x8b44f7af
+ dd 0xffff5bb1, 0xffff5bb1, 0xffff5bb1, 0xffff5bb1
+ dd 0x895cd7be, 0x895cd7be, 0x895cd7be, 0x895cd7be
+ dd 0x6b901122, 0x6b901122, 0x6b901122, 0x6b901122
+ dd 0xfd987193, 0xfd987193, 0xfd987193, 0xfd987193
+ dd 0xa679438e, 0xa679438e, 0xa679438e, 0xa679438e
+ dd 0x49b40821, 0x49b40821, 0x49b40821, 0x49b40821
+ dd 0xf61e2562, 0xf61e2562, 0xf61e2562, 0xf61e2562
+ dd 0xc040b340, 0xc040b340, 0xc040b340, 0xc040b340
+ dd 0x265e5a51, 0x265e5a51, 0x265e5a51, 0x265e5a51
+ dd 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa, 0xe9b6c7aa
+ dd 0xd62f105d, 0xd62f105d, 0xd62f105d, 0xd62f105d
+ dd 0x02441453, 0x02441453, 0x02441453, 0x02441453
+ dd 0xd8a1e681, 0xd8a1e681, 0xd8a1e681, 0xd8a1e681
+ dd 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8, 0xe7d3fbc8
+ dd 0x21e1cde6, 0x21e1cde6, 0x21e1cde6, 0x21e1cde6
+ dd 0xc33707d6, 0xc33707d6, 0xc33707d6, 0xc33707d6
+ dd 0xf4d50d87, 0xf4d50d87, 0xf4d50d87, 0xf4d50d87
+ dd 0x455a14ed, 0x455a14ed, 0x455a14ed, 0x455a14ed
+ dd 0xa9e3e905, 0xa9e3e905, 0xa9e3e905, 0xa9e3e905
+ dd 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8, 0xfcefa3f8
+ dd 0x676f02d9, 0x676f02d9, 0x676f02d9, 0x676f02d9
+ dd 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a, 0x8d2a4c8a
+ dd 0xfffa3942, 0xfffa3942, 0xfffa3942, 0xfffa3942
+ dd 0x8771f681, 0x8771f681, 0x8771f681, 0x8771f681
+ dd 0x6d9d6122, 0x6d9d6122, 0x6d9d6122, 0x6d9d6122
+ dd 0xfde5380c, 0xfde5380c, 0xfde5380c, 0xfde5380c
+ dd 0xa4beea44, 0xa4beea44, 0xa4beea44, 0xa4beea44
+ dd 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9, 0x4bdecfa9
+ dd 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60, 0xf6bb4b60
+ dd 0xbebfbc70, 0xbebfbc70, 0xbebfbc70, 0xbebfbc70
+ dd 0x289b7ec6, 0x289b7ec6, 0x289b7ec6, 0x289b7ec6
+ dd 0xeaa127fa, 0xeaa127fa, 0xeaa127fa, 0xeaa127fa
+ dd 0xd4ef3085, 0xd4ef3085, 0xd4ef3085, 0xd4ef3085
+ dd 0x04881d05, 0x04881d05, 0x04881d05, 0x04881d05
+ dd 0xd9d4d039, 0xd9d4d039, 0xd9d4d039, 0xd9d4d039
+ dd 0xe6db99e5, 0xe6db99e5, 0xe6db99e5, 0xe6db99e5
+ dd 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8, 0x1fa27cf8
+ dd 0xc4ac5665, 0xc4ac5665, 0xc4ac5665, 0xc4ac5665
+ dd 0xf4292244, 0xf4292244, 0xf4292244, 0xf4292244
+ dd 0x432aff97, 0x432aff97, 0x432aff97, 0x432aff97
+ dd 0xab9423a7, 0xab9423a7, 0xab9423a7, 0xab9423a7
+ dd 0xfc93a039, 0xfc93a039, 0xfc93a039, 0xfc93a039
+ dd 0x655b59c3, 0x655b59c3, 0x655b59c3, 0x655b59c3
+ dd 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92, 0x8f0ccc92
+ dd 0xffeff47d, 0xffeff47d, 0xffeff47d, 0xffeff47d
+ dd 0x85845dd1, 0x85845dd1, 0x85845dd1, 0x85845dd1
+ dd 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f, 0x6fa87e4f
+ dd 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0, 0xfe2ce6e0
+ dd 0xa3014314, 0xa3014314, 0xa3014314, 0xa3014314
+ dd 0x4e0811a1, 0x4e0811a1, 0x4e0811a1, 0x4e0811a1
+ dd 0xf7537e82, 0xf7537e82, 0xf7537e82, 0xf7537e82
+ dd 0xbd3af235, 0xbd3af235, 0xbd3af235, 0xbd3af235
+ dd 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb, 0x2ad7d2bb
+ dd 0xeb86d391, 0xeb86d391, 0xeb86d391, 0xeb86d391
+
+ONES:
+ dd 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+
+section .text
+
+%ifdef LINUX
+;; Linux Registers
+%define arg1 rdi
+%define arg2 rsi
+%define mem1 rcx
+%define mem2 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define mem1 rdi
+%define mem2 rsi
+%endif
+
+;; rbp is not clobbered
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+%define inp4 r12
+%define inp5 r13
+%define inp6 r14
+%define inp7 r15
+
+%define TBL rax
+%define IDX rbx
+
+%define A xmm0
+%define B xmm1
+%define C xmm2
+%define D xmm3
+%define E xmm4 ; tmp
+%define F xmm5 ; tmp
+
+%define A2 xmm6
+%define B2 xmm7
+%define C2 xmm8
+%define D2 xmm9
+
+
+%define FUN E
+%define TMP F
+%define FUN2 xmm10
+%define TMP2 xmm11
+
+%define T0 xmm10
+%define T1 xmm11
+%define T2 xmm12
+%define T3 xmm13
+%define T4 xmm14
+%define T5 xmm15
+
+; Stack Layout
+;
+; 470 DD2
+; 460 CC2
+; 450 BB2
+; 440 AA2
+; 430 DD
+; 420 CC
+; 410 BB
+; 400 AA
+;
+; 3F0 data2[15] for lanes 7...4 \
+; ... \
+; 300 data2[0] for lanes 7...4 \
+; 2F0 data2[15] for lanes 3...0 > mem block 2
+; ... /
+; 210 data2[1] for lanes 3...0 /
+; 200 data2[0] for lanes 3...0 /
+;
+; 1F0 data1[15] for lanes 7...4 \
+; ... \
+; 100 data1[0] for lanes 7...4 \
+; F0 data1[15] for lanes 3...0 > mem block 1
+; ... /
+; 10 data1[1] for lanes 3...0 /
+; 0 data1[0] for lanes 3...0 /
+
+; stack size must be an odd multiple of 8 bytes in size
+struc STACK
+_DATA: reso 2*2*16 ; 2 blocks * 2 sets of lanes * 16 regs
+_DIGEST: reso 8 ; stores AA-DD, AA2-DD2
+ resb 8 ; for alignment
+endstruc
+%define STACK_SIZE STACK_size
+
+%define AA rsp + _DIGEST + 16*0
+%define BB rsp + _DIGEST + 16*1
+%define CC rsp + _DIGEST + 16*2
+%define DD rsp + _DIGEST + 16*3
+%define AA2 rsp + _DIGEST + 16*4
+%define BB2 rsp + _DIGEST + 16*5
+%define CC2 rsp + _DIGEST + 16*6
+%define DD2 rsp + _DIGEST + 16*7
+
+;;
+;; MD5 left rotations (number of bits)
+;;
+rot11 equ 7
+rot12 equ 12
+rot13 equ 17
+rot14 equ 22
+rot21 equ 5
+rot22 equ 9
+rot23 equ 14
+rot24 equ 20
+rot31 equ 4
+rot32 equ 11
+rot33 equ 16
+rot34 equ 23
+rot41 equ 6
+rot42 equ 10
+rot43 equ 15
+rot44 equ 21
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ movdqa %%t0, %%r0
+ shufps %%t0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ shufps %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ movdqa %%t1, %%r2
+ shufps %%t1, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ shufps %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ movdqa %%r1, %%t0
+ shufps %%r1, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+
+ movdqa %%r3, %%r0
+ shufps %%r3, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ shufps %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ shufps %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+
+;;
+;; Magic functions defined in RFC 1321
+;;
+; macro MAGIC_F F,X,Y,Z ;; F = ((Z) ^ ((X) & ((Y) ^ (Z))))
+%macro MAGIC_F 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ movdqa %%F,%%Z
+ pxor %%F,%%Y
+ pand %%F,%%X
+ pxor %%F,%%Z
+%endmacro
+
+; macro MAGIC_G F,X,Y,Z ;; F = F((Z),(X),(Y))
+%macro MAGIC_G 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ MAGIC_F %%F,%%Z,%%X,%%Y
+%endmacro
+
+; macro MAGIC_H F,X,Y,Z ;; F = ((X) ^ (Y) ^ (Z))
+%macro MAGIC_H 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ movdqa %%F,%%Z
+ pxor %%F,%%Y
+ pxor %%F,%%X
+%endmacro
+
+; macro MAGIC_I F,X,Y,Z ;; F = ((Y) ^ ((X) | ~(Z)))
+%macro MAGIC_I 4
+%define %%F %1
+%define %%X %2
+%define %%Y %3
+%define %%Z %4
+ movdqa %%F,%%Z
+ pxor %%F,[rel ONES] ; pnot %%F
+ por %%F,%%X
+ pxor %%F,%%Y
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ movdqa %%tmp, %%reg
+ psrld %%tmp, (32-%%imm)
+ pslld %%reg, %%imm
+ por %%reg, %%tmp
+%endmacro
+
+;;
+;; single MD5 step
+;;
+;; A = B +ROL32((A +MAGIC(B,C,D) +data +const), nrot)
+;;
+; macro MD5_STEP1 MAGIC_FUN, A,B,C,D, A2,B2,C3,D2, FUN, TMP, data, MD5const, nrot
+%macro MD5_STEP1 14
+%define %%MAGIC_FUN %1
+%define %%A %2
+%define %%B %3
+%define %%C %4
+%define %%D %5
+%define %%A2 %6
+%define %%B2 %7
+%define %%C2 %8
+%define %%D2 %9
+%define %%FUN %10
+%define %%TMP %11
+%define %%data %12
+%define %%MD5const %13
+%define %%nrot %14
+
+ paddd %%A, %%MD5const
+ paddd %%A2, %%MD5const
+ paddd %%A, [%%data]
+ paddd %%A2, [%%data + 16*16]
+ %%MAGIC_FUN %%FUN, %%B,%%C,%%D
+ paddd %%A, %%FUN
+ %%MAGIC_FUN %%FUN, %%B2,%%C2,%%D2
+ paddd %%A2, %%FUN
+ PROLD %%A,%%nrot, %%TMP
+ PROLD %%A2,%%nrot, %%TMP
+ paddd %%A, %%B
+ paddd %%A2, %%B2
+%endmacro
+
+;;
+;; single MD5 step
+;;
+;; A = B +ROL32((A +MAGIC(B,C,D) +data +const), nrot)
+;;
+; macro MD5_STEP MAGIC_FUN, A,B,C,D, A2,B2,C3,D2, FUN, TMP, FUN2, TMP2, data,
+; MD5const, nrot
+%macro MD5_STEP 16
+%define %%MAGIC_FUN %1
+%define %%A %2
+%define %%B %3
+%define %%C %4
+%define %%D %5
+%define %%A2 %6
+%define %%B2 %7
+%define %%C2 %8
+%define %%D2 %9
+%define %%FUN %10
+%define %%TMP %11
+%define %%FUN2 %12
+%define %%TMP2 %13
+%define %%data %14
+%define %%MD5const %15
+%define %%nrot %16
+
+ paddd %%A, %%MD5const
+ paddd %%A2, %%MD5const
+ paddd %%A, [%%data]
+ paddd %%A2, [%%data + 16*16]
+ %%MAGIC_FUN %%FUN, %%B,%%C,%%D
+ %%MAGIC_FUN %%FUN2, %%B2,%%C2,%%D2
+ paddd %%A, %%FUN
+ paddd %%A2, %%FUN2
+ PROLD %%A,%%nrot, %%TMP
+ PROLD %%A2,%%nrot, %%TMP2
+ paddd %%A, %%B
+ paddd %%A2, %%B2
+%endmacro
+
+; void md5_x4x2_sse(MD5_ARGS *args, UINT64 num_blks)
+; arg 1 : pointer to MD5_ARGS structure
+; arg 2 : number of blocks (>=1)
+;
+align 32
+MKGLOBAL(md5_x4x2_sse,function,internal)
+md5_x4x2_sse:
+
+ sub rsp, STACK_SIZE
+
+ ;; each row of transposed digests is split into 2 parts, the right half stored in A, and left half in A2
+ ;; Initialize digests
+ movdqa A,[arg1 + 0*16 + 0*MD5_DIGEST_ROW_SIZE]
+ movdqa B,[arg1 + 0*16 + 1*MD5_DIGEST_ROW_SIZE]
+ movdqa C,[arg1 + 0*16 + 2*MD5_DIGEST_ROW_SIZE]
+ movdqa D,[arg1 + 0*16 + 3*MD5_DIGEST_ROW_SIZE]
+
+ ;; Initialize digests
+ movdqa A2,[arg1 + 1*16 + 0*MD5_DIGEST_ROW_SIZE]
+ movdqa B2,[arg1 + 1*16 + 1*MD5_DIGEST_ROW_SIZE]
+ movdqa C2,[arg1 + 1*16 + 2*MD5_DIGEST_ROW_SIZE]
+ movdqa D2,[arg1 + 1*16 + 3*MD5_DIGEST_ROW_SIZE]
+
+ lea TBL, [rel MD5_TABLE]
+
+ ;; load input pointers
+ mov inp0,[arg1+_data_ptr_md5 +0*PTR_SZ]
+ mov inp1,[arg1+_data_ptr_md5 +1*PTR_SZ]
+ mov inp2,[arg1+_data_ptr_md5 +2*PTR_SZ]
+ mov inp3,[arg1+_data_ptr_md5 +3*PTR_SZ]
+ mov inp4,[arg1+_data_ptr_md5 +4*PTR_SZ]
+ mov inp5,[arg1+_data_ptr_md5 +5*PTR_SZ]
+ mov inp6,[arg1+_data_ptr_md5 +6*PTR_SZ]
+ mov inp7,[arg1+_data_ptr_md5 +7*PTR_SZ]
+ xor IDX, IDX
+
+ ; Make ping-pong pointers to the two memory blocks
+ mov mem1, rsp
+ lea mem2, [rsp + 16*16*2]
+
+
+;; Load first block of data and save back to stack
+%assign I 0
+%rep 4
+ movdqu T2,[inp0+IDX+I*16]
+ movdqu T1,[inp1+IDX+I*16]
+ movdqu T4,[inp2+IDX+I*16]
+ movdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem1+(I*4+0)*16],T0
+ movdqa [mem1+(I*4+1)*16],T1
+ movdqa [mem1+(I*4+2)*16],T2
+ movdqa [mem1+(I*4+3)*16],T3
+
+ movdqu T2,[inp4+IDX+I*16]
+ movdqu T1,[inp5+IDX+I*16]
+ movdqu T4,[inp6+IDX+I*16]
+ movdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem1+(I*4+0)*16 + 16*16],T0
+ movdqa [mem1+(I*4+1)*16 + 16*16],T1
+ movdqa [mem1+(I*4+2)*16 + 16*16],T2
+ movdqa [mem1+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+%endrep
+
+lloop:
+ ; save old digests
+ movdqa [AA], A
+ movdqa [BB], B
+ movdqa [CC], C
+ movdqa [DD], D
+ ; save old digests
+ movdqa [AA2], A2
+ movdqa [BB2], B2
+ movdqa [CC2], C2
+ movdqa [DD2], D2
+
+ add IDX, 4*16
+ sub arg2, 1
+ je lastblock
+
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 0*16, [TBL+ 0*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 1*16, [TBL+ 1*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 2*16, [TBL+ 2*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 3*16, [TBL+ 3*16], rot14
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 4*16, [TBL+ 4*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 5*16, [TBL+ 5*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 6*16, [TBL+ 6*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 7*16, [TBL+ 7*16], rot14
+
+%assign I 0
+ movdqu T2,[inp0+IDX+I*16]
+ movdqu T1,[inp1+IDX+I*16]
+ movdqu T4,[inp2+IDX+I*16]
+ movdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16],T0
+ movdqa [mem2+(I*4+1)*16],T1
+ movdqa [mem2+(I*4+2)*16],T2
+ movdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 8*16, [TBL+ 8*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 9*16, [TBL+ 9*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +10*16, [TBL+10*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +11*16, [TBL+11*16], rot14
+ MD5_STEP1 MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +12*16, [TBL+12*16], rot11
+ MD5_STEP1 MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +13*16, [TBL+13*16], rot12
+ MD5_STEP1 MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +14*16, [TBL+14*16], rot13
+ MD5_STEP1 MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +15*16, [TBL+15*16], rot14
+
+
+ movdqu T2,[inp4+IDX+I*16]
+ movdqu T1,[inp5+IDX+I*16]
+ movdqu T4,[inp6+IDX+I*16]
+ movdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16 + 16*16],T0
+ movdqa [mem2+(I*4+1)*16 + 16*16],T1
+ movdqa [mem2+(I*4+2)*16 + 16*16],T2
+ movdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 1*16, [TBL+16*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 6*16, [TBL+17*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +11*16, [TBL+18*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 0*16, [TBL+19*16], rot24
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 5*16, [TBL+20*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +10*16, [TBL+21*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +15*16, [TBL+22*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 4*16, [TBL+23*16], rot24
+
+ movdqu T2,[inp0+IDX+I*16]
+ movdqu T1,[inp1+IDX+I*16]
+ movdqu T4,[inp2+IDX+I*16]
+ movdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16],T0
+ movdqa [mem2+(I*4+1)*16],T1
+ movdqa [mem2+(I*4+2)*16],T2
+ movdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 9*16, [TBL+24*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +14*16, [TBL+25*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 3*16, [TBL+26*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 8*16, [TBL+27*16], rot24
+ MD5_STEP1 MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +13*16, [TBL+28*16], rot21
+ MD5_STEP1 MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 2*16, [TBL+29*16], rot22
+ MD5_STEP1 MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 7*16, [TBL+30*16], rot23
+ MD5_STEP1 MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +12*16, [TBL+31*16], rot24
+
+ movdqu T2,[inp4+IDX+I*16]
+ movdqu T1,[inp5+IDX+I*16]
+ movdqu T4,[inp6+IDX+I*16]
+ movdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16 + 16*16],T0
+ movdqa [mem2+(I*4+1)*16 + 16*16],T1
+ movdqa [mem2+(I*4+2)*16 + 16*16],T2
+ movdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 5*16, [TBL+32*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 8*16, [TBL+33*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +11*16, [TBL+34*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +14*16, [TBL+35*16], rot34
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 1*16, [TBL+36*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 4*16, [TBL+37*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 7*16, [TBL+38*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +10*16, [TBL+39*16], rot34
+
+ movdqu T2,[inp0+IDX+I*16]
+ movdqu T1,[inp1+IDX+I*16]
+ movdqu T4,[inp2+IDX+I*16]
+ movdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16],T0
+ movdqa [mem2+(I*4+1)*16],T1
+ movdqa [mem2+(I*4+2)*16],T2
+ movdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +13*16, [TBL+40*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 0*16, [TBL+41*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 3*16, [TBL+42*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 6*16, [TBL+43*16], rot34
+ MD5_STEP1 MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 9*16, [TBL+44*16], rot31
+ MD5_STEP1 MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +12*16, [TBL+45*16], rot32
+ MD5_STEP1 MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +15*16, [TBL+46*16], rot33
+ MD5_STEP1 MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 2*16, [TBL+47*16], rot34
+
+ movdqu T2,[inp4+IDX+I*16]
+ movdqu T1,[inp5+IDX+I*16]
+ movdqu T4,[inp6+IDX+I*16]
+ movdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16 + 16*16],T0
+ movdqa [mem2+(I*4+1)*16 + 16*16],T1
+ movdqa [mem2+(I*4+2)*16 + 16*16],T2
+ movdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 0*16, [TBL+48*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 7*16, [TBL+49*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +14*16, [TBL+50*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 5*16, [TBL+51*16], rot44
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 +12*16, [TBL+52*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 + 3*16, [TBL+53*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 +10*16, [TBL+54*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 1*16, [TBL+55*16], rot44
+
+ movdqu T2,[inp0+IDX+I*16]
+ movdqu T1,[inp1+IDX+I*16]
+ movdqu T4,[inp2+IDX+I*16]
+ movdqu T3,[inp3+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16],T0
+ movdqa [mem2+(I*4+1)*16],T1
+ movdqa [mem2+(I*4+2)*16],T2
+ movdqa [mem2+(I*4+3)*16],T3
+
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 8*16, [TBL+56*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +15*16, [TBL+57*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 6*16, [TBL+58*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 +13*16, [TBL+59*16], rot44
+ MD5_STEP1 MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, mem1 + 4*16, [TBL+60*16], rot41
+ MD5_STEP1 MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, mem1 +11*16, [TBL+61*16], rot42
+ MD5_STEP1 MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, mem1 + 2*16, [TBL+62*16], rot43
+ MD5_STEP1 MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, mem1 + 9*16, [TBL+63*16], rot44
+
+ movdqu T2,[inp4+IDX+I*16]
+ movdqu T1,[inp5+IDX+I*16]
+ movdqu T4,[inp6+IDX+I*16]
+ movdqu T3,[inp7+IDX+I*16]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ movdqa [mem2+(I*4+0)*16 + 16*16],T0
+ movdqa [mem2+(I*4+1)*16 + 16*16],T1
+ movdqa [mem2+(I*4+2)*16 + 16*16],T2
+ movdqa [mem2+(I*4+3)*16 + 16*16],T3
+%assign I (I+1)
+
+
+ paddd A,[AA]
+ paddd B,[BB]
+ paddd C,[CC]
+ paddd D,[DD]
+
+ paddd A2,[AA2]
+ paddd B2,[BB2]
+ paddd C2,[CC2]
+ paddd D2,[DD2]
+
+ ; swap mem1 and mem2
+ xchg mem1, mem2
+
+ jmp lloop
+
+lastblock:
+
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+ 0*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+ 1*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+ 2*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+ 3*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+ 4*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+ 5*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+ 6*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+ 7*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+ 8*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+ 9*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+10*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+11*16], rot14
+ MD5_STEP MAGIC_F, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+12*16], rot11
+ MD5_STEP MAGIC_F, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+13*16], rot12
+ MD5_STEP MAGIC_F, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+14*16], rot13
+ MD5_STEP MAGIC_F, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+15*16], rot14
+
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+16*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+17*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+18*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+19*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+20*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+21*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+22*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+23*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+24*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+25*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+26*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+27*16], rot24
+ MD5_STEP MAGIC_G, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+28*16], rot21
+ MD5_STEP MAGIC_G, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+29*16], rot22
+ MD5_STEP MAGIC_G, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+30*16], rot23
+ MD5_STEP MAGIC_G, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+31*16], rot24
+
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+32*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+33*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+34*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+35*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+36*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+37*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+38*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+39*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+40*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+41*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+42*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+43*16], rot34
+ MD5_STEP MAGIC_H, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+44*16], rot31
+ MD5_STEP MAGIC_H, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+45*16], rot32
+ MD5_STEP MAGIC_H, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+46*16], rot33
+ MD5_STEP MAGIC_H, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+47*16], rot34
+
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 0*16, [TBL+48*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 7*16, [TBL+49*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +14*16, [TBL+50*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 5*16, [TBL+51*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 +12*16, [TBL+52*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 + 3*16, [TBL+53*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 +10*16, [TBL+54*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 1*16, [TBL+55*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 8*16, [TBL+56*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +15*16, [TBL+57*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 6*16, [TBL+58*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 +13*16, [TBL+59*16], rot44
+ MD5_STEP MAGIC_I, A,B,C,D, A2,B2,C2,D2, FUN,TMP, FUN2,TMP2, mem1 + 4*16, [TBL+60*16], rot41
+ MD5_STEP MAGIC_I, D,A,B,C, D2,A2,B2,C2, FUN,TMP, FUN2,TMP2, mem1 +11*16, [TBL+61*16], rot42
+ MD5_STEP MAGIC_I, C,D,A,B, C2,D2,A2,B2, FUN,TMP, FUN2,TMP2, mem1 + 2*16, [TBL+62*16], rot43
+ MD5_STEP MAGIC_I, B,C,D,A, B2,C2,D2,A2, FUN,TMP, FUN2,TMP2, mem1 + 9*16, [TBL+63*16], rot44
+
+ paddd A,[AA]
+ paddd B,[BB]
+ paddd C,[CC]
+ paddd D,[DD]
+
+ paddd A2,[AA2]
+ paddd B2,[BB2]
+ paddd C2,[CC2]
+ paddd D2,[DD2]
+
+ ; write out digests
+ movdqu [arg1 + 0*16 + 0*MD5_DIGEST_ROW_SIZE], A
+ movdqu [arg1 + 0*16 + 1*MD5_DIGEST_ROW_SIZE], B
+ movdqu [arg1 + 0*16 + 2*MD5_DIGEST_ROW_SIZE], C
+ movdqu [arg1 + 0*16 + 3*MD5_DIGEST_ROW_SIZE], D
+ movdqu [arg1 + 1*16 + 0*MD5_DIGEST_ROW_SIZE], A2
+ movdqu [arg1 + 1*16 + 1*MD5_DIGEST_ROW_SIZE], B2
+ movdqu [arg1 + 1*16 + 2*MD5_DIGEST_ROW_SIZE], C2
+ movdqu [arg1 + 1*16 + 3*MD5_DIGEST_ROW_SIZE], D2
+
+ ;; update input pointers
+ add inp0, IDX
+ add inp1, IDX
+ add inp2, IDX
+ add inp3, IDX
+ add inp4, IDX
+ add inp5, IDX
+ add inp6, IDX
+ add inp7, IDX
+ mov [arg1 +_data_ptr_md5 + 0*PTR_SZ], inp0
+ mov [arg1 +_data_ptr_md5 + 1*PTR_SZ], inp1
+ mov [arg1 +_data_ptr_md5 + 2*PTR_SZ], inp2
+ mov [arg1 +_data_ptr_md5 + 3*PTR_SZ], inp3
+ mov [arg1 +_data_ptr_md5 + 4*PTR_SZ], inp4
+ mov [arg1 +_data_ptr_md5 + 5*PTR_SZ], inp5
+ mov [arg1 +_data_ptr_md5 + 6*PTR_SZ], inp6
+ mov [arg1 +_data_ptr_md5 + 7*PTR_SZ], inp7
+
+ ;; Clear stack frame (72*16 bytes)
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%assign i 0
+%rep (2*2*16+8)
+ movdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+ add rsp, STACK_SIZE
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/pon_sse.asm b/src/spdk/intel-ipsec-mb/sse/pon_sse.asm
new file mode 100644
index 000000000..32585f5f8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/pon_sse.asm
@@ -0,0 +1,875 @@
+;;
+;; Copyright (c) 2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "job_aes_hmac.asm"
+%include "include/os.asm"
+%include "include/memcpy.asm"
+
+;;; This is implementation of stitched algorithms: AES128-CTR + CRC32 + BIP
+;;; This combination is required by PON/xPON/gPON standard.
+;;; Note: BIP is running XOR of double words
+;;; Order of operations:
+;;; - encrypt: CRC32, AES-CTR and BIP
+;;; - decrypt: BIP, AES-CTR and CRC32
+
+%ifndef DEC_FN_NAME
+%define DEC_FN_NAME submit_job_pon_dec_sse
+%endif
+%ifndef ENC_FN_NAME
+%define ENC_FN_NAME submit_job_pon_enc_sse
+%endif
+%ifndef ENC_NO_CTR_FN_NAME
+%define ENC_NO_CTR_FN_NAME submit_job_pon_enc_no_ctr_sse
+%endif
+%ifndef DEC_NO_CTR_FN_NAME
+%define DEC_NO_CTR_FN_NAME submit_job_pon_dec_no_ctr_sse
+%endif
+
+extern byteswap_const
+extern ddq_add_1
+
+section .data
+default rel
+
+;;; Precomputed constants for CRC32 (Ethernet FCS)
+;;; Details of the CRC algorithm and 4 byte buffer of
+;;; {0x01, 0x02, 0x03, 0x04}:
+;;; Result Poly Init RefIn RefOut XorOut
+;;; 0xB63CFBCD 0x04C11DB7 0xFFFFFFFF true true 0xFFFFFFFF
+align 16
+rk1:
+ dq 0x00000000ccaa009e, 0x00000001751997d0
+
+align 16
+rk5:
+ dq 0x00000000ccaa009e, 0x0000000163cd6124
+
+align 16
+rk7:
+ dq 0x00000001f7011640, 0x00000001db710640
+
+align 16
+pshufb_shf_table:
+ ;; use these values for shift registers with the pshufb instruction
+ dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
+ dq 0x0706050403020100, 0x000e0d0c0b0a0908
+
+align 16
+init_crc_value:
+ dq 0x00000000FFFFFFFF, 0x0000000000000000
+
+align 16
+mask:
+ dq 0xFFFFFFFFFFFFFFFF, 0x0000000000000000
+
+align 16
+mask2:
+ dq 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFF
+align 16
+mask3:
+ dq 0x8080808080808080, 0x8080808080808080
+
+align 16
+mask_out_top_bytes:
+ dq 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
+ dq 0x0000000000000000, 0x0000000000000000
+
+;; Precomputed constants for HEC calculation (XGEM header)
+;; POLY 0x53900000:
+;; k1 = 0xf9800000
+;; k2 = 0xa0900000
+;; k3 = 0x7cc00000
+;; q = 0x46b927ec
+;; p_res = 0x53900000
+
+align 16
+k3_q:
+ dq 0x7cc00000, 0x46b927ec
+
+align 16
+p_res:
+ dq 0x53900000, 0
+
+align 16
+mask_out_top_64bits:
+ dq 0xffffffff_ffffffff, 0
+
+section .text
+
+%define NUM_AES_ROUNDS 10
+
+;; note: leave xmm0 free for implicit blend
+%define xcounter xmm7
+%define xbip xmm1
+%define xcrc xmm2
+%define xcrckey xmm3
+%define xtmp1 xmm4
+%define xtmp2 xmm5
+%define xtmp3 xmm6
+%define xtmp4 xmm8
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rdx
+%define arg4 rcx
+%define tmp_1 r8
+%define tmp_2 r9
+%define tmp_3 r10
+%define tmp_4 r11
+%define tmp_5 r12
+%define tmp_6 r13
+%define tmp_7 r14
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 r8
+%define arg4 r9
+%define tmp_1 r10
+%define tmp_2 r11
+%define tmp_3 rax
+%define tmp_4 r12
+%define tmp_5 r13
+%define tmp_6 r14
+%define tmp_7 r15
+%endif
+
+%define job arg1
+
+%define p_in arg2
+%define p_keys arg3
+%define p_out arg4
+
+%define num_bytes tmp_1 ; bytes to cipher
+%define tmp tmp_2
+%define ctr_check tmp_3 ; counter block overflow check
+%define bytes_to_crc tmp_4 ; number of bytes to CRC ( < num_bytes)
+
+%define ethernet_fcs tmp_6 ; not used together with tmp3
+%define tmp2 tmp_5
+%define tmp3 tmp_6
+
+%define write_back_crc tmp_7
+%define decrypt_not_done tmp_7
+
+;;; ============================================================================
+;;; Does all AES encryption rounds
+%macro AES_ENC_ROUNDS 3
+%define %%KP %1 ; [in] pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] max rounds (128bit: 10, 12, 14)
+%define %%BLOCK %3 ; [in/out] XMM with encrypted block
+
+%assign round 0
+ pxor %%BLOCK, [%%KP + (round * 16)]
+
+%rep (%%N_ROUNDS - 1)
+%assign round (round + 1)
+ aesenc %%BLOCK, [%%KP + (round * 16)]
+%endrep
+
+%assign round (round + 1)
+ aesenclast %%BLOCK, [%%KP + (round * 16)]
+
+%endmacro
+
+;;; ============================================================================
+;;; PON stitched algorithm round on a single AES block (16 bytes):
+;;; AES-CTR (optional, depending on %%CIPH)
+;;; - prepares counter blocks
+;;; - encrypts counter blocks
+;;; - loads text
+;;; - xor's text against encrypted blocks
+;;; - stores cipher text
+;;; BIP
+;;; - BIP update on 4 x 32-bits
+;;; CRC32
+;;; - CRC32 calculation
+;;; Note: via selection of no_crc, no_bip, no_load, no_store different macro
+;;; behaviour can be achieved to match needs of the overall algorithm.
+%macro DO_PON 15
+%define %%KP %1 ; [in] GP, pointer to expanded keys
+%define %%N_ROUNDS %2 ; [in] number of AES rounds (10, 12 or 14)
+%define %%CTR %3 ; [in/out] XMM with counter block
+%define %%INP %4 ; [in/out] GP with input text pointer or "no_load"
+%define %%OUTP %5 ; [in/out] GP with output text pointer or "no_store"
+%define %%XBIP_IN_OUT %6 ; [in/out] XMM with BIP value or "no_bip"
+%define %%XCRC_IN_OUT %7 ; [in/out] XMM with CRC (can be anything if "no_crc" below)
+%define %%XCRC_MUL %8 ; [in] XMM with CRC multiplier constant (can be anything if "no_crc" below)
+%define %%TXMM0 %9 ; [clobbered|out] XMM temporary or data out (no_store)
+%define %%TXMM1 %10 ; [clobbered|in] XMM temporary or data in (no_load)
+%define %%TXMM2 %11 ; [clobbered] XMM temporary
+%define %%CRC_TYPE %12 ; [in] "first_crc" or "next_crc" or "no_crc"
+%define %%DIR %13 ; [in] "ENC" or "DEC"
+%define %%CIPH %14 ; [in] "CTR" or "NO_CTR"
+%define %%CTR_CHECK %15 ; [in/out] GP with 64bit counter (to identify overflow)
+
+%ifidn %%CIPH, CTR
+ ;; prepare counter blocks for encryption
+ movdqa %%TXMM0, %%CTR
+ pshufb %%TXMM0, [rel byteswap_const]
+ ;; perform 1 increment on whole 128 bits
+ movdqa %%TXMM2, [rel ddq_add_1]
+ paddq %%CTR, %%TXMM2
+ add %%CTR_CHECK, 1
+ jnc %%_no_ctr_overflow
+ ;; Add 1 to the top 64 bits. First shift left value 1 by 64 bits.
+ pslldq %%TXMM2, 8
+ paddq %%CTR, %%TXMM2
+%%_no_ctr_overflow:
+%endif
+ ;; CRC calculation
+%ifidn %%CRC_TYPE, next_crc
+ movdqa %%TXMM2, %%XCRC_IN_OUT
+ pclmulqdq %%TXMM2, %%XCRC_MUL, 0x01
+ pclmulqdq %%XCRC_IN_OUT, %%XCRC_MUL, 0x10
+%endif
+
+%ifnidn %%INP, no_load
+ movdqu %%TXMM1, [%%INP]
+%endif
+
+%ifidn %%CIPH, CTR
+ ;; AES rounds
+ AES_ENC_ROUNDS %%KP, %%N_ROUNDS, %%TXMM0
+
+ ;; xor plaintext/ciphertext against encrypted counter blocks
+ pxor %%TXMM0, %%TXMM1
+%else ;; CIPH = NO_CTR
+ ;; if no encryption needs to be done, move from input to output reg
+ movdqa %%TXMM0, %%TXMM1
+%endif ;; CIPH = CTR
+
+%ifidn %%CIPH, CTR
+%ifidn %%DIR, ENC
+ ;; CRC calculation for ENCRYPTION
+%ifidn %%CRC_TYPE, first_crc
+ ;; in the first run just XOR initial CRC with the first block
+ pxor %%XCRC_IN_OUT, %%TXMM1
+%endif
+%ifidn %%CRC_TYPE, next_crc
+ ;; - XOR results of CLMUL's together
+ ;; - then XOR against text block
+ pxor %%XCRC_IN_OUT, %%TXMM2
+ pxor %%XCRC_IN_OUT, %%TXMM1
+%endif
+%else
+ ;; CRC calculation for DECRYPTION
+%ifidn %%CRC_TYPE, first_crc
+ ;; in the first run just XOR initial CRC with the first block
+ pxor %%XCRC_IN_OUT, %%TXMM0
+%endif
+%ifidn %%CRC_TYPE, next_crc
+ ;; - XOR results of CLMUL's together
+ ;; - then XOR against text block
+ pxor %%XCRC_IN_OUT, %%TXMM2
+ pxor %%XCRC_IN_OUT, %%TXMM0
+%endif
+%endif ; DECRYPT
+%else ;; CIPH = NO_CTR
+ ;; CRC calculation for DECRYPTION
+%ifidn %%CRC_TYPE, first_crc
+ ;; in the first run just XOR initial CRC with the first block
+ pxor %%XCRC_IN_OUT, %%TXMM1
+%endif
+%ifidn %%CRC_TYPE, next_crc
+ ;; - XOR results of CLMUL's together
+ ;; - then XOR against text block
+ pxor %%XCRC_IN_OUT, %%TXMM2
+ pxor %%XCRC_IN_OUT, %%TXMM1
+%endif
+
+%endif ;; CIPH = CTR
+
+ ;; store the result in the output buffer
+%ifnidn %%OUTP, no_store
+ movdqu [%%OUTP], %%TXMM0
+%endif
+
+ ;; update BIP value - always use cipher text for BIP
+%ifidn %%DIR, ENC
+%ifnidn %%XBIP_IN_OUT, no_bip
+ pxor %%XBIP_IN_OUT, %%TXMM0
+%endif
+%else
+%ifnidn %%XBIP_IN_OUT, no_bip
+ pxor %%XBIP_IN_OUT, %%TXMM1
+%endif
+%endif ; DECRYPT
+
+ ;; increment in/out pointers
+%ifnidn %%INP, no_load
+ add %%INP, 16
+%endif
+%ifnidn %%OUTP, no_store
+ add %%OUTP, 16
+%endif
+%endmacro ; DO_PON
+
+;;; ============================================================================
+;;; CIPHER and BIP specified number of bytes
+%macro CIPHER_BIP_REST 14
+%define %%NUM_BYTES %1 ; [in/clobbered] number of bytes to cipher
+%define %%DIR %2 ; [in] "ENC" or "DEC"
+%define %%CIPH %3 ; [in] "CTR" or "NO_CTR"
+%define %%PTR_IN %4 ; [in/clobbered] GPR pointer to input buffer
+%define %%PTR_OUT %5 ; [in/clobbered] GPR pointer to output buffer
+%define %%PTR_KEYS %6 ; [in] GPR pointer to expanded keys
+%define %%XBIP_IN_OUT %7 ; [in/out] XMM 128-bit BIP state
+%define %%XCTR_IN_OUT %8 ; [in/out] XMM 128-bit AES counter block
+%define %%XMMT1 %9 ; [clobbered] temporary XMM
+%define %%XMMT2 %10 ; [clobbered] temporary XMM
+%define %%XMMT3 %11 ; [clobbered] temporary XMM
+%define %%CTR_CHECK %12 ; [in/out] GP with 64bit counter (to identify overflow)
+%define %%GPT1 %13 ; [clobbered] temporary GP
+%define %%GPT2 %14 ; [clobbered] temporary GP
+
+%%_cipher_last_blocks:
+ cmp %%NUM_BYTES, 16
+ jb %%_partial_block_left
+
+ DO_PON %%PTR_KEYS, NUM_AES_ROUNDS, %%XCTR_IN_OUT, %%PTR_IN, %%PTR_OUT, %%XBIP_IN_OUT, \
+ no_crc, no_crc, %%XMMT1, %%XMMT2, %%XMMT3, no_crc, %%DIR, %%CIPH, %%CTR_CHECK
+ sub %%NUM_BYTES, 16
+ jz %%_bip_done
+ jmp %%_cipher_last_blocks
+
+%%_partial_block_left:
+ simd_load_sse_15_1 %%XMMT2, %%PTR_IN, %%NUM_BYTES
+
+ ;; DO_PON() is not loading nor storing the data in this case:
+ ;; XMMT2 = data in
+ ;; XMMT1 = data out
+ DO_PON %%PTR_KEYS, NUM_AES_ROUNDS, %%XCTR_IN_OUT, no_load, no_store, no_bip, \
+ no_crc, no_crc, %%XMMT1, %%XMMT2, %%XMMT3, no_crc, %%DIR, %%CIPH, %%CTR_CHECK
+
+ ;; BIP update for partial block (mask out bytes outside the message)
+ lea %%GPT1, [rel mask_out_top_bytes + 16]
+ sub %%GPT1, %%NUM_BYTES
+ movdqu %%XMMT3, [%%GPT1]
+ ;; put masked cipher text into XMMT2 for BIP update
+%ifidn %%DIR, ENC
+ movdqa %%XMMT2, %%XMMT1
+ pand %%XMMT2, %%XMMT3
+%else
+ pand %%XMMT2, %%XMMT3
+%endif
+ pxor %%XBIP_IN_OUT, %%XMMT2
+
+ ;; store partial bytes in the output buffer
+ simd_store_sse_15 %%PTR_OUT, %%XMMT1, %%NUM_BYTES, %%GPT1, %%GPT2
+
+%%_bip_done:
+%endmacro ; CIPHER_BIP_REST
+;; =============================================================================
+;; Barrett reduction from 128-bits to 32-bits modulo Ethernet FCS polynomial
+
+%macro CRC32_REDUCE_128_TO_32 5
+%define %%CRC %1 ; [out] GP to store 32-bit Ethernet FCS value
+%define %%XCRC %2 ; [in/clobbered] XMM with CRC
+%define %%XT1 %3 ; [clobbered] temporary xmm register
+%define %%XT2 %4 ; [clobbered] temporary xmm register
+%define %%XT3 %5 ; [clobbered] temporary xmm register
+
+%define %%XCRCKEY %%XT3
+
+ ;; compute CRC of a 128-bit value
+ movdqa %%XCRCKEY, [rel rk5]
+
+ ;; 64b fold
+ movdqa %%XT1, %%XCRC
+ pclmulqdq %%XT1, %%XCRCKEY, 0x00
+ psrldq %%XCRC, 8
+ pxor %%XCRC, %%XT1
+
+ ;; 32b fold
+ movdqa %%XT1, %%XCRC
+ pslldq %%XT1, 4
+ pclmulqdq %%XT1, %%XCRCKEY, 0x10
+ pxor %%XCRC, %%XT1
+
+%%_crc_barrett:
+ ;; Barrett reduction
+ pand %%XCRC, [rel mask2]
+ movdqa %%XT1, %%XCRC
+ movdqa %%XT2, %%XCRC
+ movdqa %%XCRCKEY, [rel rk7]
+
+ pclmulqdq %%XCRC, %%XCRCKEY, 0x00
+ pxor %%XCRC, %%XT2
+ pand %%XCRC, [rel mask]
+ movdqa %%XT2, %%XCRC
+ pclmulqdq %%XCRC, %%XCRCKEY, 0x10
+ pxor %%XCRC, %%XT2
+ pxor %%XCRC, %%XT1
+ pextrd DWORD(%%CRC), %%XCRC, 2 ; 32-bit CRC value
+ not DWORD(%%CRC)
+%endmacro
+
+;; =============================================================================
+;; Barrett reduction from 128-bits to 32-bits modulo 0x53900000 polynomial
+
+%macro HEC_REDUCE_128_TO_32 4
+%define %%XMM_IN_OUT %1 ; [in/out] xmm register with data in and out
+%define %%XT1 %2 ; [clobbered] temporary xmm register
+%define %%XT2 %3 ; [clobbered] temporary xmm register
+%define %%XT3 %4 ; [clobbered] temporary xmm register
+
+%define %%K3_Q %%XT1
+%define %%P_RES %%XT2
+%define %%XTMP %%XT3
+
+ ;; 128 to 64 bit reduction
+ movdqa %%K3_Q, [k3_q]
+ movdqa %%P_RES, [p_res]
+
+ movdqa %%XTMP, %%XMM_IN_OUT
+ pclmulqdq %%XTMP, %%K3_Q, 0x01 ; K3
+ pxor %%XTMP, %%XMM_IN_OUT
+
+ pclmulqdq %%XTMP, %%K3_Q, 0x01 ; K3
+ pxor %%XMM_IN_OUT, %%XTMP
+
+ pand %%XMM_IN_OUT, [rel mask_out_top_64bits]
+
+ ;; 64 to 32 bit reduction
+ movdqa %%XTMP, %%XMM_IN_OUT
+ psrldq %%XTMP, 4
+ pclmulqdq %%XTMP, %%K3_Q, 0x10 ; Q
+ pxor %%XTMP, %%XMM_IN_OUT
+ psrldq %%XTMP, 4
+
+ pclmulqdq %%XTMP, %%P_RES, 0x00 ; P
+ pxor %%XMM_IN_OUT, %%XTMP
+%endmacro
+
+;; =============================================================================
+;; Barrett reduction from 64-bits to 32-bits modulo 0x53900000 polynomial
+
+%macro HEC_REDUCE_64_TO_32 4
+%define %%XMM_IN_OUT %1 ; [in/out] xmm register with data in and out
+%define %%XT1 %2 ; [clobbered] temporary xmm register
+%define %%XT2 %3 ; [clobbered] temporary xmm register
+%define %%XT3 %4 ; [clobbered] temporary xmm register
+
+%define %%K3_Q %%XT1
+%define %%P_RES %%XT2
+%define %%XTMP %%XT3
+
+ movdqa %%K3_Q, [k3_q]
+ movdqa %%P_RES, [p_res]
+
+ ;; 64 to 32 bit reduction
+ movdqa %%XTMP, %%XMM_IN_OUT
+ psrldq %%XTMP, 4
+ pclmulqdq %%XTMP, %%K3_Q, 0x10 ; Q
+ pxor %%XTMP, %%XMM_IN_OUT
+ psrldq %%XTMP, 4
+
+ pclmulqdq %%XTMP, %%P_RES, 0x00 ; P
+ pxor %%XMM_IN_OUT, %%XTMP
+%endmacro
+
+;; =============================================================================
+;; HEC compute and header update for 32-bit XGEM headers
+%macro HEC_COMPUTE_32 6
+%define %%HEC_IN_OUT %1 ; [in/out] GP register with HEC in LE format
+%define %%GT1 %2 ; [clobbered] temporary GP register
+%define %%XT1 %4 ; [clobbered] temporary xmm register
+%define %%XT2 %5 ; [clobbered] temporary xmm register
+%define %%XT3 %6 ; [clobbered] temporary xmm register
+%define %%XT4 %7 ; [clobbered] temporary xmm register
+
+ mov DWORD(%%GT1), DWORD(%%HEC_IN_OUT)
+ ;; shift out 13 bits of HEC value for CRC computation
+ shr DWORD(%%GT1), 13
+
+ ;; mask out current HEC value to merge with an updated HEC at the end
+ and DWORD(%%HEC_IN_OUT), 0xffff_e000
+
+ ;; prepare the message for CRC computation
+ movd %%XT1, DWORD(%%GT1)
+ pslldq %%XT1, 4 ; shift left by 32-bits
+
+ HEC_REDUCE_64_TO_32 %%XT1, %%XT2, %%XT3, %%XT4
+
+ ;; extract 32-bit value
+ ;; - normally perform 20 bit shift right but bit 0 is a parity bit
+ movd DWORD(%%GT1), %%XT1
+ shr DWORD(%%GT1), (20 - 1)
+
+ ;; merge header bytes with updated 12-bit CRC value and
+ ;; compute parity
+ or DWORD(%%GT1), DWORD(%%HEC_IN_OUT)
+ popcnt DWORD(%%HEC_IN_OUT), DWORD(%%GT1)
+ and DWORD(%%HEC_IN_OUT), 1
+ or DWORD(%%HEC_IN_OUT), DWORD(%%GT1)
+%endmacro
+
+;; =============================================================================
+;; HEC compute and header update for 64-bit XGEM headers
+%macro HEC_COMPUTE_64 6
+%define %%HEC_IN_OUT %1 ; [in/out] GP register with HEC in LE format
+%define %%GT1 %2 ; [clobbered] temporary GP register
+%define %%XT1 %3 ; [clobbered] temporary xmm register
+%define %%XT2 %4 ; [clobbered] temporary xmm register
+%define %%XT3 %5 ; [clobbered] temporary xmm register
+%define %%XT4 %6 ; [clobbered] temporary xmm register
+
+ mov %%GT1, %%HEC_IN_OUT
+ ;; shift out 13 bits of HEC value for CRC computation
+ shr %%GT1, 13
+
+ ;; mask out current HEC value to merge with an updated HEC at the end
+ and %%HEC_IN_OUT, 0xffff_ffff_ffff_e000
+
+ ;; prepare the message for CRC computation
+ movq %%XT1, %%GT1
+ pslldq %%XT1, 4 ; shift left by 32-bits
+
+ HEC_REDUCE_128_TO_32 %%XT1, %%XT2, %%XT3, %%XT4
+
+ ;; extract 32-bit value
+ ;; - normally perform 20 bit shift right but bit 0 is a parity bit
+ movd DWORD(%%GT1), %%XT1
+ shr DWORD(%%GT1), (20 - 1)
+
+ ;; merge header bytes with updated 12-bit CRC value and
+ ;; compute parity
+ or %%GT1, %%HEC_IN_OUT
+ popcnt %%HEC_IN_OUT, %%GT1
+ and %%HEC_IN_OUT, 1
+ or %%HEC_IN_OUT, %%GT1
+%endmacro
+
+;;; ============================================================================
+;;; PON stitched algorithm of AES128-CTR, CRC and BIP
+;;; - this is master macro that implements encrypt/decrypt API
+;;; - calls other macros and directly uses registers
+;;; defined at the top of the file
+%macro AES128_CTR_PON 2
+%define %%DIR %1 ; [in] direction "ENC" or "DEC"
+%define %%CIPH %2 ; [in] cipher "CTR" or "NO_CTR"
+
+ push r12
+ push r13
+ push r14
+%ifndef LINUX
+ push r15
+%endif
+
+%ifidn %%DIR, ENC
+ ;; by default write back CRC for encryption
+ mov DWORD(write_back_crc), 1
+%else
+ ;; mark decryption as finished
+ mov DWORD(decrypt_not_done), 1
+%endif
+ ;; START BIP (and update HEC if encrypt direction)
+ ;; - load XGEM header (8 bytes) for BIP (not part of encrypted payload)
+ ;; - convert it into LE
+ ;; - update HEC field in the header
+ ;; - convert it into BE
+ ;; - store back the header (with updated HEC)
+ ;; - start BIP
+ ;; (free to use tmp_1, tmp_2 and tmp_3 at this stage)
+ mov tmp_2, [job + _src]
+ add tmp_2, [job + _hash_start_src_offset_in_bytes]
+ mov tmp_3, [tmp_2]
+%ifidn %%DIR, ENC
+ bswap tmp_3 ; go to LE
+ HEC_COMPUTE_64 tmp_3, tmp_1, xtmp1, xtmp2, xtmp3, xtmp4
+ mov bytes_to_crc, tmp_3
+ shr bytes_to_crc, (48 + 2) ; PLI = MSB 14 bits
+ bswap tmp_3 ; go back to BE
+ mov [tmp_2], tmp_3
+ movq xbip, tmp_3
+%else
+ movq xbip, tmp_3
+ mov bytes_to_crc, tmp_3
+ bswap bytes_to_crc ; go to LE
+ shr bytes_to_crc, (48 + 2) ; PLI = MSB 14 bits
+%endif
+ cmp bytes_to_crc, 4
+ ja %%_crc_not_zero
+ ;; XGEM payload shorter or equal to 4 bytes
+%ifidn %%DIR, ENC
+ ;; Don't write Ethernet FCS on encryption
+ xor DWORD(write_back_crc), DWORD(write_back_crc)
+%else
+ ;; Mark decryption as not finished
+ ;; - Ethernet FCS is not computed
+ ;; - decrypt + BIP to be done at the end
+ xor DWORD(decrypt_not_done), DWORD(decrypt_not_done)
+%endif
+ mov DWORD(bytes_to_crc), 4 ; it will be zero after the sub (avoid jmp)
+%%_crc_not_zero:
+ sub bytes_to_crc, 4 ; subtract size of the CRC itself
+
+%ifidn %%CIPH, CTR
+ ;; - read 16 bytes of IV
+ ;; - convert to little endian format
+ ;; - save least significant 8 bytes in GP register for overflow check
+ mov tmp, [job + _iv]
+ movdqu xcounter, [tmp]
+ pshufb xcounter, [rel byteswap_const]
+ movq ctr_check, xcounter
+%endif
+
+ ;; get input buffer (after XGEM header)
+ mov p_in, [job + _src]
+ add p_in, [job + _cipher_start_src_offset_in_bytes]
+
+ ;; get output buffer
+ mov p_out, [job + _dst]
+
+%ifidn %%CIPH, CTR
+ ;; get key pointers
+ mov p_keys, [job + _aes_enc_key_expanded]
+%endif
+
+ ;; initial CRC value
+ movdqa xcrc, [rel init_crc_value]
+
+ ;; load CRC constants
+ movdqa xcrckey, [rel rk1] ; rk1 and rk2 in xcrckey
+
+ ;; get number of bytes to cipher
+%ifidn %%CIPH, CTR
+ mov num_bytes, [job + _msg_len_to_cipher_in_bytes]
+%else
+ ;; Message length to cipher is 0
+ ;; - length is obtained from message length to hash (BIP) minus XGEM header size
+ mov num_bytes, [job + _msg_len_to_hash_in_bytes]
+ sub num_bytes, 8
+%endif
+ or bytes_to_crc, bytes_to_crc
+ jz %%_crc_done
+
+ cmp bytes_to_crc, 32
+ jae %%_at_least_32_bytes
+
+%ifidn %%DIR, DEC
+ ;; decrypt the buffer first
+ mov tmp, num_bytes
+ CIPHER_BIP_REST tmp, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+
+ ;; correct in/out pointers - go back to start of the buffers
+ mov tmp, num_bytes
+ and tmp, -16 ; partial block handler doesn't increment pointers
+ sub p_in, tmp
+ sub p_out, tmp
+%endif ; DECRYPTION
+
+ ;; less than 32 bytes
+ cmp bytes_to_crc, 16
+ je %%_exact_16_left
+ jl %%_less_than_16_left
+ ;; load the plaintext
+%ifidn %%DIR, ENC
+ movdqu xtmp1, [p_in]
+%else
+ movdqu xtmp1, [p_out]
+%endif
+ pxor xcrc, xtmp1 ; xor the initial crc value
+ jmp %%_crc_two_xmms
+
+%%_exact_16_left:
+%ifidn %%DIR, ENC
+ movdqu xtmp1, [p_in]
+%else
+ movdqu xtmp1, [p_out]
+%endif
+ pxor xcrc, xtmp1 ; xor the initial CRC value
+ jmp %%_128_done
+
+%%_less_than_16_left:
+%ifidn %%DIR, ENC
+ simd_load_sse_15_1 xtmp1, p_in, bytes_to_crc
+%else
+ simd_load_sse_15_1 xtmp1, p_out, bytes_to_crc
+%endif
+ pxor xcrc, xtmp1 ; xor the initial CRC value
+
+ lea tmp, [rel pshufb_shf_table]
+ movdqu xtmp1, [tmp + bytes_to_crc]
+ pshufb xcrc, xtmp1
+ jmp %%_128_done
+
+%%_at_least_32_bytes:
+ DO_PON p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, first_crc, %%DIR, %%CIPH, ctr_check
+ sub num_bytes, 16
+ sub bytes_to_crc, 16
+
+%%_main_loop:
+ cmp bytes_to_crc, 16
+ jb %%_exit_loop
+ DO_PON p_keys, NUM_AES_ROUNDS, xcounter, p_in, p_out, xbip, \
+ xcrc, xcrckey, xtmp1, xtmp2, xtmp3, next_crc, %%DIR, %%CIPH, ctr_check
+ sub num_bytes, 16
+ sub bytes_to_crc, 16
+%ifidn %%DIR, ENC
+ jz %%_128_done
+%endif
+ jmp %%_main_loop
+
+%%_exit_loop:
+
+%ifidn %%DIR, DEC
+ ;; decrypt rest of the message including CRC and optional padding
+ mov tmp, num_bytes
+
+ CIPHER_BIP_REST tmp, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+
+ mov tmp, num_bytes ; correct in/out pointers - to point before cipher & BIP
+ and tmp, -16 ; partial block handler doesn't increment pointers
+ sub p_in, tmp
+ sub p_out, tmp
+
+ or bytes_to_crc, bytes_to_crc
+ jz %%_128_done
+%endif ; DECRYPTION
+
+ ;; Partial bytes left - complete CRC calculation
+%%_crc_two_xmms:
+ lea tmp, [rel pshufb_shf_table]
+ movdqu xtmp2, [tmp + bytes_to_crc]
+%ifidn %%DIR, ENC
+ movdqu xtmp1, [p_in - 16 + bytes_to_crc] ; xtmp1 = data for CRC
+%else
+ movdqu xtmp1, [p_out - 16 + bytes_to_crc] ; xtmp1 = data for CRC
+%endif
+ movdqa xtmp3, xcrc
+ pshufb xcrc, xtmp2 ; top num_bytes with LSB xcrc
+ pxor xtmp2, [rel mask3]
+ pshufb xtmp3, xtmp2 ; bottom (16 - num_bytes) with MSB xcrc
+
+ ;; data num_bytes (top) blended with MSB bytes of CRC (bottom)
+ movdqa xmm0, xtmp2
+ pblendvb xtmp3, xtmp1 ; xmm0 implicit
+
+ ;; final CRC calculation
+ movdqa xtmp1, xcrc
+ pclmulqdq xtmp1, xcrckey, 0x01
+ pclmulqdq xcrc, xcrckey, 0x10
+ pxor xcrc, xtmp3
+ pxor xcrc, xtmp1
+
+%%_128_done:
+ CRC32_REDUCE_128_TO_32 ethernet_fcs, xcrc, xtmp1, xtmp2, xcrckey
+
+%%_crc_done:
+ ;; @todo - store-to-load problem in ENC case (to be fixed later)
+ ;; - store CRC in input buffer and authentication tag output
+ ;; - encrypt remaining bytes
+%ifidn %%DIR, ENC
+ or DWORD(write_back_crc), DWORD(write_back_crc)
+ jz %%_skip_crc_write_back
+ mov [p_in + bytes_to_crc], DWORD(ethernet_fcs)
+%%_skip_crc_write_back:
+%endif
+ mov tmp, [job + _auth_tag_output]
+ mov [tmp + 4], DWORD(ethernet_fcs)
+
+ or num_bytes, num_bytes
+ jz %%_do_not_cipher_the_rest
+
+ ;; encrypt rest of the message
+ ;; - partial bytes including CRC and optional padding
+ ;; decrypt rest of the message
+ ;; - this may only happen when XGEM payload is short and padding is added
+%ifidn %%DIR, DEC
+ or DWORD(decrypt_not_done), DWORD(decrypt_not_done)
+ jnz %%_do_not_cipher_the_rest
+%endif
+ CIPHER_BIP_REST num_bytes, %%DIR, %%CIPH, p_in, p_out, p_keys, xbip, \
+ xcounter, xtmp1, xtmp2, xtmp3, ctr_check, tmp2, tmp3
+%%_do_not_cipher_the_rest:
+
+ ;; finalize BIP
+ movdqa xtmp1, xbip
+ movdqa xtmp2, xbip
+ movdqa xtmp3, xbip
+ psrldq xtmp1, 4
+ psrldq xtmp2, 8
+ psrldq xtmp3, 12
+ pxor xtmp1, xtmp2
+ pxor xbip, xtmp3
+ pxor xbip, xtmp1
+ movd [tmp], xbip
+
+ ;; set job status
+ or dword [job + _status], STS_COMPLETED
+
+ ;; return job
+ mov rax, job
+
+%ifndef LINUX
+ pop r15
+%endif
+ pop r14
+ pop r13
+ pop r12
+%endmacro ; AES128_CTR_PON
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;; aes_cntr_128_pon_enc_sse(JOB_AES_HMAC *job)
+align 32
+MKGLOBAL(ENC_FN_NAME,function,internal)
+ENC_FN_NAME:
+ AES128_CTR_PON ENC, CTR
+ ret
+
+;;; aes_cntr_128_pon_dec_sse(JOB_AES_HMAC *job)
+align 32
+MKGLOBAL(DEC_FN_NAME,function,internal)
+DEC_FN_NAME:
+ AES128_CTR_PON DEC, CTR
+ ret
+
+;;; aes_cntr_128_pon_enc_no_ctr_sse(JOB_AES_HMAC *job)
+align 32
+MKGLOBAL(ENC_NO_CTR_FN_NAME,function,internal)
+ENC_NO_CTR_FN_NAME:
+ AES128_CTR_PON ENC, NO_CTR
+ ret
+
+;;; aes_cntr_128_pon_dec_no_ctr_sse(JOB_AES_HMAC *job)
+align 32
+MKGLOBAL(DEC_NO_CTR_FN_NAME,function,internal)
+DEC_NO_CTR_FN_NAME:
+ AES128_CTR_PON DEC, NO_CTR
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha1_mult_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha1_mult_sse.asm
new file mode 100644
index 000000000..355a38906
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha1_mult_sse.asm
@@ -0,0 +1,435 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%include "mb_mgr_datastruct.asm"
+
+section .data
+default rel
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+section .text
+
+;; code to compute quad SHA1 using SSE
+;; derived from ...\sha1_multiple\sha1_quad4.asm
+;; variation of sha1_mult2.asm : clobbers all xmm regs, rcx left intact
+;; rbx, rsi, rdi, rbp, r12-r15 left intact
+;; This version is not safe to call from C/C++
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rdx r8 r9 r10 r11
+;; Windows preserves: rbx rcx rsi rdi rbp r12 r13 r14 r15
+;;
+;; Linux clobbers: rax rsi r8 r9 r10 r11
+;; Linux preserves: rbx rcx rdx rdi rbp r12 r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ movaps %%t0, %%r0 ; t0 = {a3 a2 a1 a0}
+ shufps %%t0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ shufps %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ movaps %%t1, %%r2 ; t1 = {c3 c2 c1 c0}
+ shufps %%t1, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ shufps %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ movaps %%r1, %%t0 ; r1 = {b1 b0 a1 a0}
+ shufps %%r1, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+
+ movaps %%r3, %%r0 ; r3 = {b3 b2 a3 a2}
+ shufps %%r3, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ shufps %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ shufps %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+;;
+;; Magic functions defined in FIPS 180-1
+;;
+; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
+%macro MAGIC_F0 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ movdqa %%regF,%%regC
+ pxor %%regF,%%regD
+ pand %%regF,%%regB
+ pxor %%regF,%%regD
+%endmacro
+
+; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F1 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ movdqa %%regF,%%regD
+ pxor %%regF,%%regC
+ pxor %%regF,%%regB
+%endmacro
+
+; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
+%macro MAGIC_F2 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ movdqa %%regF,%%regB
+ movdqa %%regT,%%regB
+ por %%regF,%%regC
+ pand %%regT,%%regC
+ pand %%regF,%%regD
+ por %%regF,%%regT
+%endmacro
+
+; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F3 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
+%endmacro
+
+; PROLD reg, imm, tmp
+%macro PROLD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ movdqa %%tmp, %%reg
+ pslld %%reg, %%imm
+ psrld %%tmp, (32-%%imm)
+ por %%reg, %%tmp
+%endmacro
+
+%macro SHA1_STEP_00_15 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ paddd %%regE,%%immCNT
+ paddd %%regE,[rsp + (%%memW * 16)]
+ movdqa %%regT,%%regA
+ PROLD %%regT,5, %%regF
+ paddd %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ paddd %%regE,%%regF
+%endmacro
+
+%macro SHA1_STEP_16_79 10
+%define %%regA %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regE %5
+%define %%regT %6
+%define %%regF %7
+%define %%memW %8
+%define %%immCNT %9
+%define %%MAGIC %10
+ paddd %%regE,%%immCNT
+ movdqa W14, [rsp + ((%%memW - 14) & 15) * 16]
+ pxor W16, W14
+ pxor W16, [rsp + ((%%memW - 8) & 15) * 16]
+ pxor W16, [rsp + ((%%memW - 3) & 15) * 16]
+ movdqa %%regF, W16
+ pslld W16, 1
+ psrld %%regF, (32-1)
+ por %%regF, W16
+ ROTATE_W
+
+ movdqa [rsp + ((%%memW - 0) & 15) * 16],%%regF
+ paddd %%regE,%%regF
+ movdqa %%regT,%%regA
+ PROLD %%regT,5, %%regF
+ paddd %%regE,%%regT
+ %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
+ PROLD %%regB,30, %%regT
+ paddd %%regE,%%regF
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; FRAMESZ must be an odd multiple of 8
+%define FRAMESZ 16*16 + 8
+
+%define MOVPS movdqu
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%else
+%define arg1 rcx
+%define arg2 rdx
+%endif
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+
+%define IDX rax
+
+%define A xmm0
+%define B xmm1
+%define C xmm2
+%define D xmm3
+%define E xmm4
+%define F xmm5 ; tmp
+%define G xmm6 ; tmp
+
+%define TMP G
+%define FUN F
+%define K xmm7
+
+%define AA xmm8
+%define BB xmm9
+%define CC xmm10
+%define DD xmm11
+%define EE xmm12
+
+%define T0 xmm6
+%define T1 xmm7
+%define T2 xmm8
+%define T3 xmm9
+%define T4 xmm10
+%define T5 xmm11
+
+%define W14 xmm13
+%define W15 xmm14
+%define W16 xmm15
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ E
+%xdefine E D
+%xdefine D C
+%xdefine C B
+%xdefine B A
+%xdefine A TMP_
+%endm
+
+%macro ROTATE_W 0
+%xdefine TMP_ W16
+%xdefine W16 W15
+%xdefine W15 W14
+%xdefine W14 TMP_
+%endm
+
+align 32
+
+; XMM registers are clobbered. Saving/restoring must be done at a higher level
+
+; void sha1_mult_sse(SHA1_ARGS *args, UINT32 size_in_blocks);
+; arg 1 : rcx : pointer to args
+; arg 2 : rdx : size (in blocks) ;; assumed to be >= 1
+MKGLOBAL(sha1_mult_sse,function,internal)
+sha1_mult_sse:
+
+ sub rsp, FRAMESZ
+
+ ;; Initialize digests
+ movdqa A, [arg1 + 0*SHA1_DIGEST_ROW_SIZE]
+ movdqa B, [arg1 + 1*SHA1_DIGEST_ROW_SIZE]
+ movdqa C, [arg1 + 2*SHA1_DIGEST_ROW_SIZE]
+ movdqa D, [arg1 + 3*SHA1_DIGEST_ROW_SIZE]
+ movdqa E, [arg1 + 4*SHA1_DIGEST_ROW_SIZE]
+ DBGPRINTL_XMM "Sha1-SSE Incoming transposed digest", A, B, C, D, E
+ ;; load input pointers
+ mov inp0,[arg1 + _data_ptr_sha1 + 0*PTR_SZ]
+ mov inp1,[arg1 + _data_ptr_sha1 + 1*PTR_SZ]
+ mov inp2,[arg1 + _data_ptr_sha1 + 2*PTR_SZ]
+ mov inp3,[arg1 + _data_ptr_sha1 + 3*PTR_SZ]
+ DBGPRINTL64 "Sha1-SSE Incoming data ptrs", inp0, inp1, inp2, inp3
+ xor IDX, IDX
+lloop:
+ movdqa F, [rel PSHUFFLE_BYTE_FLIP_MASK]
+%assign I 0
+%rep 4
+ MOVPS T2,[inp0+IDX]
+ MOVPS T1,[inp1+IDX]
+ MOVPS T4,[inp2+IDX]
+ MOVPS T3,[inp3+IDX]
+ TRANSPOSE T2, T1, T4, T3, T0, T5
+ DBGPRINTL_XMM "sha1 incoming data", T0, T1, T2, T3
+ pshufb T0, F
+ movdqa [rsp+(I*4+0)*16],T0
+ pshufb T1, F
+ movdqa [rsp+(I*4+1)*16],T1
+ pshufb T2, F
+ movdqa [rsp+(I*4+2)*16],T2
+ pshufb T3, F
+ movdqa [rsp+(I*4+3)*16],T3
+ add IDX, 4*4
+%assign I (I+1)
+%endrep
+
+ ; save old digests
+ movdqa AA, A
+ movdqa BB, B
+ movdqa CC, C
+ movdqa DD, D
+ movdqa EE, E
+
+;;
+;; perform 0-79 steps
+;;
+ movdqa K, [rel K00_19]
+;; do rounds 0...15
+%assign I 0
+%rep 16
+ SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 16...19
+ movdqa W16, [rsp + ((16 - 16) & 15) * 16]
+ movdqa W15, [rsp + ((16 - 15) & 15) * 16]
+%rep 4
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 20...39
+ movdqa K, [rel K20_39]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 40...59
+ movdqa K, [rel K40_59]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+;; do rounds 60...79
+ movdqa K, [rel K60_79]
+%rep 20
+ SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
+ ROTATE_ARGS
+%assign I (I+1)
+%endrep
+
+ paddd A,AA
+ paddd B,BB
+ paddd C,CC
+ paddd D,DD
+ paddd E,EE
+
+ sub arg2, 1
+ jne lloop
+
+ ; write out digests
+ movdqa [arg1 + 0*SHA1_DIGEST_ROW_SIZE], A
+ movdqa [arg1 + 1*SHA1_DIGEST_ROW_SIZE], B
+ movdqa [arg1 + 2*SHA1_DIGEST_ROW_SIZE], C
+ movdqa [arg1 + 3*SHA1_DIGEST_ROW_SIZE], D
+ movdqa [arg1 + 4*SHA1_DIGEST_ROW_SIZE], E
+ DBGPRINTL_XMM "Sha1 Outgoing transposed digest", A, B, C, D, E
+ ; update input pointers
+ add inp0, IDX
+ mov [arg1 + _data_ptr_sha1 + 0*PTR_SZ], inp0
+ add inp1, IDX
+ mov [arg1 + _data_ptr_sha1 + 1*PTR_SZ], inp1
+ add inp2, IDX
+ mov [arg1 + _data_ptr_sha1 + 2*PTR_SZ], inp2
+ add inp3, IDX
+ mov [arg1 + _data_ptr_sha1 + 3*PTR_SZ], inp3
+ DBGPRINTL64 "Sha1-sse outgoing data ptrs", inp0, inp1, inp2, inp3
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame (16*16 bytes)
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%assign i 0
+%rep 16
+ movdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, FRAMESZ
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha1_ni_x2_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha1_ni_x2_sse.asm
new file mode 100644
index 000000000..c02c88eed
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha1_ni_x2_sse.asm
@@ -0,0 +1,493 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RDX R10 R11
+;; Windows preserves: RAX RBX RCX RBP RSI RDI R8 R9 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RDI R10 R11
+;; Linux preserves: RAX RBX RCX RDX RBP RSI R8 R9 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%include "mb_mgr_datastruct.asm"
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define args arg1
+%define NUM_BLKS arg2
+
+; reso = resdq => 16 bytes
+struc frame
+.ABCD_SAVE reso 1
+.E_SAVE reso 1
+.ABCD_SAVEb reso 1
+.E_SAVEb reso 1
+.align resq 1
+endstruc
+
+%define INP r10
+%define INPb r11
+
+%define ABCD xmm0
+%define E0 xmm1 ; Need two E's b/c they ping pong
+%define E1 xmm2
+%define MSG0 xmm3
+%define MSG1 xmm4
+%define MSG2 xmm5
+%define MSG3 xmm6
+
+%define ABCDb xmm7
+%define E0b xmm8 ; Need two E's b/c they ping pong
+%define E1b xmm9
+%define MSG0b xmm10
+%define MSG1b xmm11
+%define MSG2b xmm12
+%define MSG3b xmm13
+
+%define SHUF_MASK xmm14
+%define E_MASK xmm15
+
+section .data
+default rel
+align 64
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x000102030405060708090a0b0c0d0e0f
+ dq 0x08090a0b0c0d0e0f, 0x0001020304050607
+UPPER_WORD_MASK: ;ddq 0xFFFFFFFF000000000000000000000000
+ dq 0x0000000000000000, 0xFFFFFFFF00000000
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha1_ni(SHA1_ARGS *args, UINT32 size_in_blocks)
+;; arg1 : pointer to args
+;; arg2 : size (in blocks) ;; assumed to be >= 1
+
+section .text
+MKGLOBAL(sha1_ni,function,internal)
+align 32
+sha1_ni:
+ sub rsp, frame_size
+
+ DBGPRINTL "enter sha1-ni-x2"
+
+ shl NUM_BLKS, 6 ; convert to bytes
+ jz done_hash
+
+ ;; load input pointers
+ mov INP, [args + _data_ptr_sha1 + 0*PTR_SZ]
+ DBGPRINTL64 "jobA: pointer", INP
+ mov INPb, [args + _data_ptr_sha1 + 1*PTR_SZ]
+
+ add NUM_BLKS, INP ; pointer to end of data block -> loop exit condition
+
+ ;; load initial digest
+ movdqu ABCD, [args + 0*SHA1NI_DIGEST_ROW_SIZE]
+ pxor E0, E0
+ pinsrd E0, [args + 0*SHA1NI_DIGEST_ROW_SIZE + 4*SHA1_DIGEST_WORD_SIZE], 3
+ pshufd ABCD, ABCD, 0x1B
+
+ DBGPRINTL_XMM "jobA: digest in words[0-3]", ABCD
+ DBGPRINTL_XMM "jobA: digest in word 4", E0
+
+ movdqu ABCDb, [args + 1*SHA1NI_DIGEST_ROW_SIZE]
+ pxor E0b, E0b
+ pinsrd E0b, [args + 1*SHA1NI_DIGEST_ROW_SIZE + 4*SHA1_DIGEST_WORD_SIZE], 3
+ pshufd ABCDb, ABCDb, 0x1B
+
+ movdqa SHUF_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ movdqa E_MASK, [rel UPPER_WORD_MASK]
+
+ DBGPRINTL "jobA data:"
+loop0:
+ ;; Copy digests
+ movdqa [rsp + frame.ABCD_SAVE], ABCD
+ movdqa [rsp + frame.E_SAVE], E0
+ movdqa [rsp + frame.ABCD_SAVEb], ABCDb
+ movdqa [rsp + frame.E_SAVEb], E0b
+
+ ;; Only needed if not using sha1nexte for rounds 0-3
+ pand E0, E_MASK
+ pand E0b, E_MASK
+
+ ;; Needed if using sha1nexte for rounds 0-3
+ ;; Need to rotate E right by 30
+ ;movdqa E1, E0
+ ;psrld E0, 30
+ ;pslld E1, 2
+ ;pxor E0, E1
+
+ ;; Rounds 0-3
+ movdqu MSG0, [INP + 0*16]
+ pshufb MSG0, SHUF_MASK
+ DBGPRINT_XMM MSG0
+ ;sha1nexte E0, MSG0
+ paddd E0, MSG0 ; instead of sha1nexte
+ movdqa E1, ABCD
+ sha1rnds4 ABCD, E0, 0
+ movdqu MSG0b, [INPb + 0*16]
+ pshufb MSG0b, SHUF_MASK
+ ;sha1nexte E0b, MSG0b
+ paddd E0b, MSG0b ; instead of sha1nexte
+ movdqa E1b, ABCDb
+ sha1rnds4 ABCDb, E0b, 0
+
+ ;; Rounds 4-7
+ movdqu MSG1, [INP + 1*16]
+ pshufb MSG1, SHUF_MASK
+ DBGPRINT_XMM MSG1
+ sha1nexte E1, MSG1
+ movdqa E0, ABCD
+ sha1rnds4 ABCD, E1, 0
+ sha1msg1 MSG0, MSG1
+ movdqu MSG1b, [INPb + 1*16]
+ pshufb MSG1b, SHUF_MASK
+ sha1nexte E1b, MSG1b
+ movdqa E0b, ABCDb
+ sha1rnds4 ABCDb, E1b, 0
+ sha1msg1 MSG0b, MSG1b
+
+ ;; Rounds 8-11
+ movdqu MSG2, [INP + 2*16]
+ pshufb MSG2, SHUF_MASK
+ DBGPRINT_XMM MSG2
+ sha1nexte E0, MSG2
+ movdqa E1, ABCD
+ sha1rnds4 ABCD, E0, 0
+ sha1msg1 MSG1, MSG2
+ pxor MSG0, MSG2
+ movdqu MSG2b, [INPb + 2*16]
+ pshufb MSG2b, SHUF_MASK
+ sha1nexte E0b, MSG2b
+ movdqa E1b, ABCDb
+ sha1rnds4 ABCDb, E0b, 0
+ sha1msg1 MSG1b, MSG2b
+ pxor MSG0b, MSG2b
+
+ ;; Rounds 12-15
+ movdqu MSG3, [INP + 3*16]
+ pshufb MSG3, SHUF_MASK
+ DBGPRINT_XMM MSG3
+ sha1nexte E1, MSG3
+ movdqa E0, ABCD
+ sha1msg2 MSG0, MSG3
+ sha1rnds4 ABCD, E1, 0
+ sha1msg1 MSG2, MSG3
+ pxor MSG1, MSG3
+ movdqu MSG3b, [INPb + 3*16]
+ pshufb MSG3b, SHUF_MASK
+ sha1nexte E1b, MSG3b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG0b, MSG3b
+ sha1rnds4 ABCDb, E1b, 0
+ sha1msg1 MSG2b, MSG3b
+ pxor MSG1b, MSG3b
+
+
+ ;; Rounds 16-19
+ sha1nexte E0, MSG0
+ movdqa E1, ABCD
+ sha1msg2 MSG1, MSG0
+ sha1rnds4 ABCD, E0, 0
+ sha1msg1 MSG3, MSG0
+ pxor MSG2, MSG0
+ sha1nexte E0b, MSG0b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG1b, MSG0b
+ sha1rnds4 ABCDb, E0b, 0
+ sha1msg1 MSG3b, MSG0b
+ pxor MSG2b, MSG0b
+
+ ;; Rounds 20-23
+ sha1nexte E1, MSG1
+ movdqa E0, ABCD
+ sha1msg2 MSG2, MSG1
+ sha1rnds4 ABCD, E1, 1
+ sha1msg1 MSG0, MSG1
+ pxor MSG3, MSG1
+ sha1nexte E1b, MSG1b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG2b, MSG1b
+ sha1rnds4 ABCDb, E1b, 1
+ sha1msg1 MSG0b, MSG1b
+ pxor MSG3b, MSG1b
+
+ ;; Rounds 24-27
+ sha1nexte E0, MSG2
+ movdqa E1, ABCD
+ sha1msg2 MSG3, MSG2
+ sha1rnds4 ABCD, E0, 1
+ sha1msg1 MSG1, MSG2
+ pxor MSG0, MSG2
+ sha1nexte E0b, MSG2b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG3b, MSG2b
+ sha1rnds4 ABCDb, E0b, 1
+ sha1msg1 MSG1b, MSG2b
+ pxor MSG0b, MSG2b
+
+ ;; Rounds 28-31
+ sha1nexte E1, MSG3
+ movdqa E0, ABCD
+ sha1msg2 MSG0, MSG3
+ sha1rnds4 ABCD, E1, 1
+ sha1msg1 MSG2, MSG3
+ pxor MSG1, MSG3
+ sha1nexte E1b, MSG3b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG0b, MSG3b
+ sha1rnds4 ABCDb, E1b, 1
+ sha1msg1 MSG2b, MSG3b
+ pxor MSG1b, MSG3b
+
+ ;; Rounds 32-35
+ sha1nexte E0, MSG0
+ movdqa E1, ABCD
+ sha1msg2 MSG1, MSG0
+ sha1rnds4 ABCD, E0, 1
+ sha1msg1 MSG3, MSG0
+ pxor MSG2, MSG0
+ sha1nexte E0b, MSG0b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG1b, MSG0b
+ sha1rnds4 ABCDb, E0b, 1
+ sha1msg1 MSG3b, MSG0b
+ pxor MSG2b, MSG0b
+
+ ;; Rounds 36-39
+ sha1nexte E1, MSG1
+ movdqa E0, ABCD
+ sha1msg2 MSG2, MSG1
+ sha1rnds4 ABCD, E1, 1
+ sha1msg1 MSG0, MSG1
+ pxor MSG3, MSG1
+ sha1nexte E1b, MSG1b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG2b, MSG1b
+ sha1rnds4 ABCDb, E1b, 1
+ sha1msg1 MSG0b, MSG1b
+ pxor MSG3b, MSG1b
+
+ ;; Rounds 40-43
+ sha1nexte E0, MSG2
+ movdqa E1, ABCD
+ sha1msg2 MSG3, MSG2
+ sha1rnds4 ABCD, E0, 2
+ sha1msg1 MSG1, MSG2
+ pxor MSG0, MSG2
+ sha1nexte E0b, MSG2b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG3b, MSG2b
+ sha1rnds4 ABCDb, E0b, 2
+ sha1msg1 MSG1b, MSG2b
+ pxor MSG0b, MSG2b
+
+ ;; Rounds 44-47
+ sha1nexte E1, MSG3
+ movdqa E0, ABCD
+ sha1msg2 MSG0, MSG3
+ sha1rnds4 ABCD, E1, 2
+ sha1msg1 MSG2, MSG3
+ pxor MSG1, MSG3
+ sha1nexte E1b, MSG3b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG0b, MSG3b
+ sha1rnds4 ABCDb, E1b, 2
+ sha1msg1 MSG2b, MSG3b
+ pxor MSG1b, MSG3b
+
+ ;; Rounds 48-51
+ sha1nexte E0, MSG0
+ movdqa E1, ABCD
+ sha1msg2 MSG1, MSG0
+ sha1rnds4 ABCD, E0, 2
+ sha1msg1 MSG3, MSG0
+ pxor MSG2, MSG0
+ sha1nexte E0b, MSG0b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG1b, MSG0b
+ sha1rnds4 ABCDb, E0b, 2
+ sha1msg1 MSG3b, MSG0b
+ pxor MSG2b, MSG0b
+
+ ;; Rounds 52-55
+ sha1nexte E1, MSG1
+ movdqa E0, ABCD
+ sha1msg2 MSG2, MSG1
+ sha1rnds4 ABCD, E1, 2
+ sha1msg1 MSG0, MSG1
+ pxor MSG3, MSG1
+ sha1nexte E1b, MSG1b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG2b, MSG1b
+ sha1rnds4 ABCDb, E1b, 2
+ sha1msg1 MSG0b, MSG1b
+ pxor MSG3b, MSG1b
+
+ ;; Rounds 56-59
+ sha1nexte E0, MSG2
+ movdqa E1, ABCD
+ sha1msg2 MSG3, MSG2
+ sha1rnds4 ABCD, E0, 2
+ sha1msg1 MSG1, MSG2
+ pxor MSG0, MSG2
+ sha1nexte E0b, MSG2b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG3b, MSG2b
+ sha1rnds4 ABCDb, E0b, 2
+ sha1msg1 MSG1b, MSG2b
+ pxor MSG0b, MSG2b
+
+ ;; Rounds 60-63
+ sha1nexte E1, MSG3
+ movdqa E0, ABCD
+ sha1msg2 MSG0, MSG3
+ sha1rnds4 ABCD, E1, 3
+ sha1msg1 MSG2, MSG3
+ pxor MSG1, MSG3
+ sha1nexte E1b, MSG3b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG0b, MSG3b
+ sha1rnds4 ABCDb, E1b, 3
+ sha1msg1 MSG2b, MSG3b
+ pxor MSG1b, MSG3b
+
+ ;; Rounds 64-67
+ sha1nexte E0, MSG0
+ movdqa E1, ABCD
+ sha1msg2 MSG1, MSG0
+ sha1rnds4 ABCD, E0, 3
+ sha1msg1 MSG3, MSG0
+ pxor MSG2, MSG0
+ sha1nexte E0b, MSG0b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG1b, MSG0b
+ sha1rnds4 ABCDb, E0b, 3
+ sha1msg1 MSG3b, MSG0b
+ pxor MSG2b, MSG0b
+
+ ;; Rounds 68-71
+ sha1nexte E1, MSG1
+ movdqa E0, ABCD
+ sha1msg2 MSG2, MSG1
+ sha1rnds4 ABCD, E1, 3
+ pxor MSG3, MSG1
+ sha1nexte E1b, MSG1b
+ movdqa E0b, ABCDb
+ sha1msg2 MSG2b, MSG1b
+ sha1rnds4 ABCDb, E1b, 3
+ pxor MSG3b, MSG1b
+
+ ;; Rounds 72-75
+ sha1nexte E0, MSG2
+ movdqa E1, ABCD
+ sha1msg2 MSG3, MSG2
+ sha1rnds4 ABCD, E0, 3
+ sha1nexte E0b, MSG2b
+ movdqa E1b, ABCDb
+ sha1msg2 MSG3b, MSG2b
+ sha1rnds4 ABCDb, E0b, 3
+
+ ;; Rounds 76-79
+ sha1nexte E1, MSG3
+ movdqa E0, ABCD
+ sha1rnds4 ABCD, E1, 3
+ sha1nexte E1b, MSG3b
+ movdqa E0b, ABCDb
+ sha1rnds4 ABCDb, E1b, 3
+
+ ;; Need to rotate E left by 30
+ movdqa E1, E0
+ pslld E0, 30
+ psrld E1, 2
+ pxor E0, E1
+ movdqa E1b, E0b
+ pslld E0b, 30
+ psrld E1b, 2
+ pxor E0b, E1b
+
+ paddd ABCD, [rsp + frame.ABCD_SAVE]
+ paddd E0, [rsp + frame.E_SAVE]
+ paddd ABCDb, [rsp + frame.ABCD_SAVEb]
+ paddd E0b, [rsp + frame.E_SAVEb]
+
+ add INP, 64
+ add INPb, 64
+ cmp INP, NUM_BLKS
+ jne loop0
+
+ ;; write out digests
+ pshufd ABCD, ABCD, 0x1B
+ movdqu [args + 0*SHA1NI_DIGEST_ROW_SIZE], ABCD
+ pextrd [args + 0*SHA1NI_DIGEST_ROW_SIZE + 4*SHA1_DIGEST_WORD_SIZE], E0, 3
+ DBGPRINTL_XMM "jobA: digest out words[0-3]", ABCD
+ DBGPRINTL_XMM "jobA: digest out word 4", E0
+
+ pshufd ABCDb, ABCDb, 0x1B
+ movdqu [args + 1*SHA1NI_DIGEST_ROW_SIZE], ABCDb
+ pextrd [args + 1*SHA1NI_DIGEST_ROW_SIZE + 4*SHA1_DIGEST_WORD_SIZE], E0b, 3
+
+ ;; update input pointers
+ mov [args + _data_ptr_sha1 + 0*PTR_SZ], INP
+ mov [args + _data_ptr_sha1 + 1*PTR_SZ], INPb
+
+done_hash:
+
+ ;; Clear stack frame (4*16 bytes)
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%assign i 0
+%rep 4
+ movdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, frame_size
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha1_one_block_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha1_one_block_sse.asm
new file mode 100644
index 000000000..9039660cc
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha1_one_block_sse.asm
@@ -0,0 +1,512 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; SHA1 code, hybrid, rolled, interleaved
+; Uses SSE instructions
+%include "include/os.asm"
+
+section .data
+default rel
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+K00_19: ;ddq 0x5A8279995A8279995A8279995A827999
+ dq 0x5A8279995A827999, 0x5A8279995A827999
+K20_39: ;ddq 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
+ dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
+K40_59: ;ddq 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
+ dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
+K60_79: ;ddq 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
+ dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6
+
+section .text
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+
+%ifdef LINUX
+%define INP rdi ; 1st arg
+%define CTX rsi ; 2nd arg
+%define REG3 edx
+%define REG4 ecx
+%else
+%define INP rcx ; 1st arg
+%define CTX rdx ; 2nd arg
+%define REG3 edi
+%define REG4 esi
+%endif
+
+%define FRAMESZ 3*16 + 1*8
+%define _RSP FRAMESZ-1*8 + rsp
+
+%define a eax
+%define b ebx
+%define c REG3
+%define d REG4
+%define e r8d
+%define T1 r9d
+%define f r10d
+%define RND r11d
+%define g r12d
+%define h r13d
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XK xmm2
+
+%xdefine X0 xmm3
+%xdefine X1 xmm4
+%xdefine X2 xmm5
+%xdefine X3 xmm6
+%xdefine X4 xmm7
+
+%define XFER xmm8
+
+%define SZ 4
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X4
+%xdefine X4 X_
+%endmacro
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+
+;; Magic functions defined in FIPS 180-1
+;;
+; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
+%macro MAGIC_F0 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regC
+ xor %%regF,%%regD
+ and %%regF,%%regB
+ xor %%regF,%%regD
+%endmacro
+
+; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F1 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regD
+ xor %%regF,%%regC
+ xor %%regF,%%regB
+%endmacro
+
+; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
+%macro MAGIC_F2 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ mov %%regF,%%regB
+ mov %%regT,%%regB
+ or %%regF,%%regC
+ and %%regT,%%regC
+ and %%regF,%%regD
+ or %%regF,%%regT
+%endmacro
+
+; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
+%macro MAGIC_F3 5
+%define %%regF %1
+%define %%regB %2
+%define %%regC %3
+%define %%regD %4
+%define %%regT %5
+ MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
+%endmacro
+
+;; input is T1
+%macro ROUND 1
+%define %%MAGIC %1
+ add e,T1
+ mov T1,a
+ rol T1,5
+ add e,T1
+ %%MAGIC h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ rol b,30
+ add h,e
+ROTATE_ARGS
+%endmacro
+
+%macro do_4i 1
+ movdqa XFER, XK
+ paddd XFER, X0
+ pextrd T1, XFER, 0
+ ;ROUND %1
+ add e,T1
+ ;SCHEDULE_4
+ movdqa XTMP0, X1
+ palignr XTMP0, X0, 8 ; XTMP0 = W[-14]
+ mov T1,a
+ movdqa XTMP1, X2
+ rol T1,5
+ pxor XTMP1, X0 ; XTMP1 = W[-8] ^ W[-16]
+ add e,T1
+ pxor XTMP0, XTMP1 ; XTMP0 = W[-8] ^ W[-14] ^ W[-16]
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+
+ ;; Finish low half
+ movdqa X4, X3
+ rol b,30
+ psrldq X4, 4 ; X4 = W[-3] {xxBA}
+ add h,e
+ROTATE_ARGS
+ pextrd T1, XFER, 1
+ ;ROUND %1
+ add e,T1
+ pxor X4, XTMP0 ;
+ mov T1,a
+ movdqa XTMP1, X4
+ rol T1,5
+ ;; rotate X4 left 1
+ psrld XTMP1, (32-1)
+ add e,T1
+ pslld X4, 1
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ pxor X4, XTMP1 ; X4 = W[0] {xxBA}
+ rol b,30
+ add h,e
+ROTATE_ARGS
+ pextrd T1, XFER, 2
+ ;ROUND %1
+ add e,T1
+ movdqa XTMP1, X4
+ mov T1,a
+
+ ;; Finish high half
+ palignr XTMP1, X3, 4 ; XTMP1 = w[-3] {DCxx}
+ rol T1,5
+ add e,T1
+ pxor XTMP0, XTMP1
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ ;; rotate XTMP0 left 1
+ movdqa XTMP1, XTMP0
+ psrld XTMP1, (32-1)
+ rol b,30
+ add h,e
+ROTATE_ARGS
+ pextrd T1, XFER, 3
+ ;ROUND %1
+ add e,T1
+ mov T1,a
+ pslld XTMP0, 1
+ rol T1,5
+ add e,T1
+ pxor XTMP0, XTMP1 ; XTMP0 = W[0] {DCxx}
+ %1 h,b,c,d,T1 ;; FUN = MAGIC_Fi(B,C,D)
+ ;; COMBINE HALVES
+ shufps X4, XTMP0, 11100100b ; X4 = X[0] {DCBA}
+ rol b,30
+ add h,e
+
+ rotate_Xs
+ROTATE_ARGS
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha1_block_sse(void *input_data, UINT32 digest[5])
+;; arg 1 : (in) pointer to one block of data
+;; arg 2 : (in/out) pointer to read/write digest
+MKGLOBAL(sha1_block_sse,function,internal)
+align 32
+sha1_block_sse:
+ push rbx
+ push rsi
+ push rdi
+ push r12
+ push r13
+
+ movdqa XTMP0, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+%ifndef LINUX
+ mov rax, rsp ; copy rsp
+ sub rsp, FRAMESZ
+ and rsp, -16 ; align stack frame
+ mov [_RSP],rax ; save copy of rsp
+ movdqa [rsp + 0 * 16], xmm6
+ movdqa [rsp + 1 * 16], xmm7
+ movdqa [rsp + 2 * 16], xmm8
+
+%endif
+ MOVDQ X0, [INP + 0*16]
+ MOVDQ X1, [INP + 1*16]
+
+ ;; load next message block
+ MOVDQ X2, [INP + 2*16]
+ MOVDQ X3, [INP + 3*16]
+
+ ;; set up a-f based on h0-h4
+ ;; byte swap first 16 dwords
+ mov a, [SZ*0 + CTX]
+ pshufb X0, XTMP0
+ mov b, [SZ*1 + CTX]
+ pshufb X1, XTMP0
+ mov c, [SZ*2 + CTX]
+ pshufb X2, XTMP0
+ mov d, [SZ*3 + CTX]
+ pshufb X3, XTMP0
+ mov e, [SZ*4 + CTX]
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 00-19
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa XK, [rel K00_19]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop1_5
+align 16
+loop1:
+
+ do_4i MAGIC_F0
+
+loop1_5:
+ do_4i MAGIC_F0
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ movdqa X0, X2
+ movdqa X2, X4
+ movdqa X4, X1
+ movdqa X1, X3
+
+ sub RND, 1
+ jne loop1
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 00-19
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 20-39
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa XK, [rel K20_39]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop2_5
+align 16
+loop2:
+
+ do_4i MAGIC_F1
+
+loop2_5:
+ do_4i MAGIC_F1
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ movdqa X0, X2
+ movdqa X2, X4
+ movdqa X4, X1
+ movdqa X1, X3
+
+ sub RND, 1
+ jne loop2
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 20-39
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 40-59
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa XK, [rel K40_59]
+ mov RND, 3
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ ROTATE_ARGS
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ jmp loop3_5
+align 16
+loop3:
+
+ do_4i MAGIC_F2
+
+loop3_5:
+ do_4i MAGIC_F2
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ movdqa X0, X2
+ movdqa X2, X4
+ movdqa X4, X1
+ movdqa X1, X3
+
+ sub RND, 1
+ jne loop3
+
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+ rotate_Xs
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; end rounds 40-59
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;; do rounds 60-79
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ movdqa XK, [rel K60_79]
+
+ do_4i MAGIC_F3
+
+ movdqa XFER, XK
+ paddd XFER, X0
+ pextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ movdqa XFER, XK
+ paddd XFER, X1
+ pextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ movdqa XFER, XK
+ paddd XFER, X2
+ pextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ movdqa XFER, XK
+ paddd XFER, X3
+ pextrd T1, XFER, 0
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 1
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 2
+ ROUND MAGIC_F3
+ pextrd T1, XFER, 3
+ ROUND MAGIC_F3
+
+ ;; update result digest h0-h4
+ add [SZ*0 + CTX], a
+ add [SZ*1 + CTX], b
+ add [SZ*2 + CTX], c
+ add [SZ*3 + CTX], d
+ add [SZ*4 + CTX], e
+
+%ifndef LINUX
+ movdqa xmm8, [rsp + 2 * 16]
+ movdqa xmm7, [rsp + 1 * 16]
+ movdqa xmm6, [rsp + 0 * 16]
+
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ pxor xmm0, xmm0
+ movdqa [rsp + 0 * 16], xmm0
+ movdqa [rsp + 1 * 16], xmm0
+ movdqa [rsp + 2 * 16], xmm0
+%endif
+
+ mov rsp, [_RSP]
+%endif ;; LINUX
+
+ pop r13
+ pop r12
+ pop rdi
+ pop rsi
+ pop rbx
+
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha224_one_block_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha224_one_block_sse.asm
new file mode 100644
index 000000000..f0914d799
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha224_one_block_sse.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define FUNC sha224_block_sse
+
+%include "sse/sha256_one_block_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/sha256_ni_x2_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha256_ni_x2_sse.asm
new file mode 100644
index 000000000..fa593defa
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha256_ni_x2_sse.asm
@@ -0,0 +1,614 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; Stack must be aligned to 32 bytes before call
+;;
+;; Registers: RAX RBX RCX RDX RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Windows clobbers: RCX RDX RSI RDI R11
+;; Windows preserves: RAX RBX RBP R8 R9 R10 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;; Linux clobbers: RCX RDX RSI RDI R11
+;; Linux preserves: RAX RBX RBP R8 R9 R10 R12 R13 R14 R15
+;; -----------------------------------------------------------
+;;
+;; Linux/Windows clobbers: xmm0 - xmm15
+
+%include "include/os.asm"
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+%include "mb_mgr_datastruct.asm"
+
+; resdq = res0 => 16 bytes
+struc frame
+.ABEF_SAVE reso 1
+.CDGH_SAVE reso 1
+.ABEF_SAVEb reso 1
+.CDGH_SAVEb reso 1
+.align resq 1
+endstruc
+
+%ifdef LINUX
+%define arg1 rdi
+%define arg2 rsi
+%define arg3 rcx
+%define arg4 rdx
+%else
+%define arg1 rcx
+%define arg2 rdx
+%define arg3 rdi
+%define arg4 rsi
+%endif
+
+%define args arg1
+%define NUM_BLKS arg2
+
+%define INP arg3
+%define INPb arg4
+
+
+%define SHA256CONSTANTS r11
+
+;; MSG MUST be xmm0 (implicit argument)
+%define MSG xmm0
+%define STATE0 xmm1
+%define STATE1 xmm2
+%define MSGTMP0 xmm3
+%define MSGTMP1 xmm4
+%define MSGTMP2 xmm5
+%define MSGTMP3 xmm6
+%define MSGTMP4 xmm7
+
+%define STATE0b xmm8
+%define STATE1b xmm9
+%define MSGTMP0b xmm10
+%define MSGTMP1b xmm11
+%define MSGTMP2b xmm12
+%define MSGTMP3b xmm13
+%define MSGTMP xmm14
+
+%define SHUF_MASK xmm15
+
+section .data
+default rel
+align 64
+K256:
+ dd 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ dd 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ dd 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ dd 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ dd 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ dd 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ dd 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ dd 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ dd 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ dd 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ dd 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ dd 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ dd 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ dd 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ dd 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ dd 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK:
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha256_ni(SHA256_ARGS *args, UINT32 size_in_blocks)
+;; arg1 : pointer to args
+;; arg2 : size (in blocks) ;; assumed to be >= 1
+section .text
+MKGLOBAL(sha256_ni,function,internal)
+align 32
+sha256_ni:
+ sub rsp, frame_size
+
+ DBGPRINTL "enter sha256-ni-x2"
+
+ shl NUM_BLKS, 6 ; convert to bytes
+ jz done_hash
+
+ DBGPRINTL64 "jobA/B byte size:", NUM_BLKS
+
+ ;; load input pointers
+ mov INP, [args + _data_ptr_sha256 + 0*PTR_SZ]
+ mov INPb, [args + _data_ptr_sha256 + 1*PTR_SZ]
+
+ add NUM_BLKS, INP ; pointer to end of data
+
+ ;; load initial digest
+ ;; Probably need to reorder these appropriately
+ ;; DCBA, HGFE -> ABEF, CDGH
+
+ movdqu STATE0, [args + 0*SHA256NI_DIGEST_ROW_SIZE]
+ movdqu STATE1, [args + 0*SHA256NI_DIGEST_ROW_SIZE + 16]
+ movdqu STATE0b, [args + 1*SHA256NI_DIGEST_ROW_SIZE]
+ movdqu STATE1b, [args + 1*SHA256NI_DIGEST_ROW_SIZE + 16]
+ DBGPRINTL "jobA digest in:"
+ DBGPRINT_XMM STATE0
+ DBGPRINT_XMM STATE1
+ DBGPRINTL "jobB digest in:"
+ DBGPRINT_XMM STATE0b
+ DBGPRINT_XMM STATE1b
+
+ pshufd STATE0, STATE0, 0xB1 ; CDAB
+ pshufd STATE1, STATE1, 0x1B ; EFGH
+ movdqa MSGTMP4, STATE0
+ pshufd STATE0b, STATE0b, 0xB1 ; CDAB
+ pshufd STATE1b, STATE1b, 0x1B ; EFGH
+ movdqa MSGTMP, STATE0b
+ palignr STATE0, STATE1, 8 ; ABEF
+ palignr STATE0b, STATE1b, 8 ; ABEF
+ pblendw STATE1, MSGTMP4, 0xF0 ; CDGH
+ pblendw STATE1b, MSGTMP, 0xF0 ; CDGH
+
+ lea SHA256CONSTANTS,[rel K256]
+ movdqa SHUF_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+%ifdef DO_DBGPRINT
+ ;; prin buffer A
+ push r10
+ push NUM_BLKS
+ DBGPRINTL "jobA data:"
+ xor r10, r10
+ sub NUM_BLKS, INP
+.loop_dbgA:
+ movdqu MSG, [INP + r10 + 0*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INP + r10 + 1*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INP + r10 + 2*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INP + r10 + 3*16]
+ DBGPRINT_XMM MSG
+ add r10, 64
+ cmp NUM_BLKS, r10
+ jne .loop_dbgA
+ pop NUM_BLKS
+ pop r10
+%endif
+
+%ifdef DO_DBGPRINT
+ ;; prin buffer B
+ push r10
+ push NUM_BLKS
+ DBGPRINTL "jobB data:"
+ xor r10, r10
+ sub NUM_BLKS, INP
+.loop_dbgB:
+ movdqu MSG, [INPb + r10 + 0*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INPb + r10 + 1*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INPb + r10 + 2*16]
+ DBGPRINT_XMM MSG
+ movdqu MSG, [INPb + r10 + 3*16]
+ DBGPRINT_XMM MSG
+ add r10, 64
+ cmp NUM_BLKS, r10
+ jne .loop_dbgB
+ pop NUM_BLKS
+ pop r10
+%endif
+
+.loop0:
+ ;; Save digests
+ movdqa [rsp + frame.ABEF_SAVE], STATE0
+ movdqa [rsp + frame.CDGH_SAVE], STATE1
+ movdqa [rsp + frame.ABEF_SAVEb], STATE0b
+ movdqa [rsp + frame.CDGH_SAVEb], STATE1b
+
+ ;; Rounds 0-3
+ movdqu MSG, [INP + 0*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP0, MSG
+ paddd MSG, [SHA256CONSTANTS + 0*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqu MSG, [INPb + 0*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP0b, MSG
+ paddd MSG, [SHA256CONSTANTS + 0*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+
+ ;; Rounds 4-7
+ movdqu MSG, [INP + 1*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP1, MSG
+ paddd MSG, [SHA256CONSTANTS + 1*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqu MSG, [INPb + 1*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP1b, MSG
+ paddd MSG, [SHA256CONSTANTS + 1*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP0, MSGTMP1
+ sha256msg1 MSGTMP0b, MSGTMP1b
+
+ ;; Rounds 8-11
+ movdqu MSG, [INP + 2*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP2, MSG
+ paddd MSG, [SHA256CONSTANTS + 2*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqu MSG, [INPb + 2*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP2b, MSG
+ paddd MSG, [SHA256CONSTANTS + 2*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP1, MSGTMP2
+ sha256msg1 MSGTMP1b, MSGTMP2b
+
+ ;; Rounds 12-15
+ movdqu MSG, [INP + 3*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP3, MSG
+ paddd MSG, [SHA256CONSTANTS + 3*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3
+ palignr MSGTMP, MSGTMP2, 4
+ paddd MSGTMP0, MSGTMP
+ sha256msg2 MSGTMP0, MSGTMP3
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqu MSG, [INPb + 3*16]
+ pshufb MSG, SHUF_MASK
+ movdqa MSGTMP3b, MSG
+ paddd MSG, [SHA256CONSTANTS + 3*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3b
+ palignr MSGTMP, MSGTMP2b, 4
+ paddd MSGTMP0b, MSGTMP
+ sha256msg2 MSGTMP0b, MSGTMP3b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP2, MSGTMP3
+ sha256msg1 MSGTMP2b, MSGTMP3b
+
+ ;; Rounds 16-19
+ movdqa MSG, MSGTMP0
+ paddd MSG, [SHA256CONSTANTS + 4*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0
+ palignr MSGTMP, MSGTMP3, 4
+ paddd MSGTMP1, MSGTMP
+ sha256msg2 MSGTMP1, MSGTMP0
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP0b
+ paddd MSG, [SHA256CONSTANTS + 4*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0b
+ palignr MSGTMP, MSGTMP3b, 4
+ paddd MSGTMP1b, MSGTMP
+ sha256msg2 MSGTMP1b, MSGTMP0b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP3, MSGTMP0
+ sha256msg1 MSGTMP3b, MSGTMP0b
+
+ ;; Rounds 20-23
+ movdqa MSG, MSGTMP1
+ paddd MSG, [SHA256CONSTANTS + 5*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1
+ palignr MSGTMP, MSGTMP0, 4
+ paddd MSGTMP2, MSGTMP
+ sha256msg2 MSGTMP2, MSGTMP1
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP1b
+ paddd MSG, [SHA256CONSTANTS + 5*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1b
+ palignr MSGTMP, MSGTMP0b, 4
+ paddd MSGTMP2b, MSGTMP
+ sha256msg2 MSGTMP2b, MSGTMP1b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP0, MSGTMP1
+ sha256msg1 MSGTMP0b, MSGTMP1b
+
+ ;; Rounds 24-27
+ movdqa MSG, MSGTMP2
+ paddd MSG, [SHA256CONSTANTS + 6*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2
+ palignr MSGTMP, MSGTMP1, 4
+ paddd MSGTMP3, MSGTMP
+ sha256msg2 MSGTMP3, MSGTMP2
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP2b
+ paddd MSG, [SHA256CONSTANTS + 6*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2b
+ palignr MSGTMP, MSGTMP1b, 4
+ paddd MSGTMP3b, MSGTMP
+ sha256msg2 MSGTMP3b, MSGTMP2b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP1, MSGTMP2
+ sha256msg1 MSGTMP1b, MSGTMP2b
+
+ ;; Rounds 28-31
+ movdqa MSG, MSGTMP3
+ paddd MSG, [SHA256CONSTANTS + 7*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3
+ palignr MSGTMP, MSGTMP2, 4
+ paddd MSGTMP0, MSGTMP
+ sha256msg2 MSGTMP0, MSGTMP3
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP3b
+ paddd MSG, [SHA256CONSTANTS + 7*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3b
+ palignr MSGTMP, MSGTMP2b, 4
+ paddd MSGTMP0b, MSGTMP
+ sha256msg2 MSGTMP0b, MSGTMP3b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP2, MSGTMP3
+ sha256msg1 MSGTMP2b, MSGTMP3b
+
+ ;; Rounds 32-35
+ movdqa MSG, MSGTMP0
+ paddd MSG, [SHA256CONSTANTS + 8*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0
+ palignr MSGTMP, MSGTMP3, 4
+ paddd MSGTMP1, MSGTMP
+ sha256msg2 MSGTMP1, MSGTMP0
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP0b
+ paddd MSG, [SHA256CONSTANTS + 8*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0b
+ palignr MSGTMP, MSGTMP3b, 4
+ paddd MSGTMP1b, MSGTMP
+ sha256msg2 MSGTMP1b, MSGTMP0b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP3, MSGTMP0
+ sha256msg1 MSGTMP3b, MSGTMP0b
+
+ ;; Rounds 36-39
+ movdqa MSG, MSGTMP1
+ paddd MSG, [SHA256CONSTANTS + 9*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1
+ palignr MSGTMP, MSGTMP0, 4
+ paddd MSGTMP2, MSGTMP
+ sha256msg2 MSGTMP2, MSGTMP1
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP1b
+ paddd MSG, [SHA256CONSTANTS + 9*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1b
+ palignr MSGTMP, MSGTMP0b, 4
+ paddd MSGTMP2b, MSGTMP
+ sha256msg2 MSGTMP2b, MSGTMP1b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP0, MSGTMP1
+ sha256msg1 MSGTMP0b, MSGTMP1b
+
+ ;; Rounds 40-43
+ movdqa MSG, MSGTMP2
+ paddd MSG, [SHA256CONSTANTS + 10*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2
+ palignr MSGTMP, MSGTMP1, 4
+ paddd MSGTMP3, MSGTMP
+ sha256msg2 MSGTMP3, MSGTMP2
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP2b
+ paddd MSG, [SHA256CONSTANTS + 10*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2b
+ palignr MSGTMP, MSGTMP1b, 4
+ paddd MSGTMP3b, MSGTMP
+ sha256msg2 MSGTMP3b, MSGTMP2b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP1, MSGTMP2
+ sha256msg1 MSGTMP1b, MSGTMP2b
+
+ ;; Rounds 44-47
+ movdqa MSG, MSGTMP3
+ paddd MSG, [SHA256CONSTANTS + 11*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3
+ palignr MSGTMP, MSGTMP2, 4
+ paddd MSGTMP0, MSGTMP
+ sha256msg2 MSGTMP0, MSGTMP3
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP3b
+ paddd MSG, [SHA256CONSTANTS + 11*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP3b
+ palignr MSGTMP, MSGTMP2b, 4
+ paddd MSGTMP0b, MSGTMP
+ sha256msg2 MSGTMP0b, MSGTMP3b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP2, MSGTMP3
+ sha256msg1 MSGTMP2b, MSGTMP3b
+
+ ;; Rounds 48-51
+ movdqa MSG, MSGTMP0
+ paddd MSG, [SHA256CONSTANTS + 12*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0
+ palignr MSGTMP, MSGTMP3, 4
+ paddd MSGTMP1, MSGTMP
+ sha256msg2 MSGTMP1, MSGTMP0
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP0b
+ paddd MSG, [SHA256CONSTANTS + 12*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP0b
+ palignr MSGTMP, MSGTMP3b, 4
+ paddd MSGTMP1b, MSGTMP
+ sha256msg2 MSGTMP1b, MSGTMP0b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+ sha256msg1 MSGTMP3, MSGTMP0
+ sha256msg1 MSGTMP3b, MSGTMP0b
+
+ ;; Rounds 52-55
+ movdqa MSG, MSGTMP1
+ paddd MSG, [SHA256CONSTANTS + 13*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1
+ palignr MSGTMP, MSGTMP0, 4
+ paddd MSGTMP2, MSGTMP
+ sha256msg2 MSGTMP2, MSGTMP1
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP1b
+ paddd MSG, [SHA256CONSTANTS + 13*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP1b
+ palignr MSGTMP, MSGTMP0b, 4
+ paddd MSGTMP2b, MSGTMP
+ sha256msg2 MSGTMP2b, MSGTMP1b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+
+ ;; Rounds 56-59
+ movdqa MSG, MSGTMP2
+ paddd MSG, [SHA256CONSTANTS + 14*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2
+ palignr MSGTMP, MSGTMP1, 4
+ paddd MSGTMP3, MSGTMP
+ sha256msg2 MSGTMP3, MSGTMP2
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP2b
+ paddd MSG, [SHA256CONSTANTS + 14*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ movdqa MSGTMP, MSGTMP2b
+ palignr MSGTMP, MSGTMP1b, 4
+ paddd MSGTMP3b, MSGTMP
+ sha256msg2 MSGTMP3b, MSGTMP2b
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+
+ ;; Rounds 60-63
+ movdqa MSG, MSGTMP3
+ paddd MSG, [SHA256CONSTANTS + 15*16]
+ sha256rnds2 STATE1, STATE0, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0, STATE1, MSG ; MSG is implicit argument
+ movdqa MSG, MSGTMP3b
+ paddd MSG, [SHA256CONSTANTS + 15*16]
+ sha256rnds2 STATE1b, STATE0b, MSG ; MSG is implicit argument
+ pshufd MSG, MSG, 0x0E
+ sha256rnds2 STATE0b, STATE1b, MSG ; MSG is implicit argument
+
+ paddd STATE0, [rsp + frame.ABEF_SAVE]
+ paddd STATE1, [rsp + frame.CDGH_SAVE]
+ paddd STATE0b, [rsp + frame.ABEF_SAVEb]
+ paddd STATE1b, [rsp + frame.CDGH_SAVEb]
+
+ add INP, 64
+ add INPb, 64
+ cmp INP, NUM_BLKS
+ jne .loop0
+
+ ;; update data pointers
+ mov [args + _data_ptr_sha256 + 0*PTR_SZ], INP
+ mov [args + _data_ptr_sha256 + 1*PTR_SZ], INPb
+
+ ; Reorder for writeback
+ pshufd STATE0, STATE0, 0x1B ; FEBA
+ pshufd STATE1, STATE1, 0xB1 ; DCHG
+ movdqa MSGTMP4, STATE0
+ pshufd STATE0b, STATE0b, 0x1B ; FEBA
+ pshufd STATE1b, STATE1b, 0xB1 ; DCHG
+ movdqa MSGTMP, STATE0b
+ pblendw STATE0, STATE1, 0xF0 ; DCBA
+ pblendw STATE0b, STATE1b, 0xF0 ; DCBA
+ palignr STATE1, MSGTMP4, 8 ; HGFE
+ palignr STATE1b, MSGTMP, 8 ; HGFE
+
+ ;; update digests
+ movdqu [args + 0*SHA256NI_DIGEST_ROW_SIZE + 0*16], STATE0
+ movdqu [args + 0*SHA256NI_DIGEST_ROW_SIZE + 1*16], STATE1
+ movdqu [args + 1*SHA256NI_DIGEST_ROW_SIZE + 0*16], STATE0b
+ movdqu [args + 1*SHA256NI_DIGEST_ROW_SIZE + 1*16], STATE1b
+
+ DBGPRINTL "jobA digest out:"
+ DBGPRINT_XMM STATE0
+ DBGPRINT_XMM STATE1
+ DBGPRINTL "jobB digest out:"
+ DBGPRINT_XMM STATE0b
+ DBGPRINT_XMM STATE1b
+
+done_hash:
+ DBGPRINTL "exit sha256-ni-x2"
+
+ ;; Clear stack frame (4*16 bytes)
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%assign i 0
+%rep 4
+ movdqa [rsp + i*16], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, frame_size
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha256_one_block_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha256_one_block_sse.asm
new file mode 100644
index 000000000..8869c14ef
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha256_one_block_sse.asm
@@ -0,0 +1,512 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%include "include/os.asm"
+
+section .data
+default rel
+align 64
+K256:
+ dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+; shuffle xBxA -> 00BA
+_SHUF_00BA: ;ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100
+ dq 0x0b0a090803020100, 0xFFFFFFFFFFFFFFFF
+; shuffle xDxC -> DC00
+_SHUF_DC00: ;ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF
+ dq 0xFFFFFFFFFFFFFFFF, 0x0b0a090803020100
+
+section .text
+
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+; Load xmm with mem and byte swap each dword
+%macro COPY_XMM_AND_BSWAP 3
+ MOVDQ %1, %2
+ pshufb %1, %3
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define X0 xmm4
+%define X1 xmm5
+%define X2 xmm6
+%define X3 xmm7
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XTMP2 xmm2
+%define XTMP3 xmm3
+%define XTMP4 xmm8
+%define XFER xmm9
+
+%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA
+%define SHUF_DC00 xmm11 ; shuffle xDxC -> DC00
+%define BYTE_FLIP_MASK xmm12
+
+%ifdef LINUX
+%define CTX rsi ; 2nd arg
+%define INP rdi ; 1st arg
+
+%define SRND rdi ; clobbers INP
+%define c ecx
+%define d r8d
+%define e edx
+%else
+%define CTX rdx ; 2nd arg
+%define INP rcx ; 1st arg
+
+%define SRND rcx ; clobbers INP
+%define c edi
+%define d esi
+%define e r8d
+
+%endif
+%define TBL rbp
+%define a eax
+%define b ebx
+
+%define f r9d
+%define g r10d
+%define h r11d
+
+%define y0 r13d
+%define y1 r14d
+%define y2 r15d
+
+
+struc STACK
+%ifndef LINUX
+_XMM_SAVE: reso 7
+%endif
+_XFER: reso 1
+endstruc
+
+%ifndef FUNC
+%define FUNC sha256_block_sse
+%endif
+
+; rotate_Xs
+; Rotate values of symbols X0...X3
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X_
+%endm
+
+; ROTATE_ARGS
+; Rotate values of symbols a...h
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+%macro FOUR_ROUNDS_AND_SCHED 0
+ ;; compute s0 four at a time and s1 two at a time
+ ;; compute W[-16] + W[-7] 4 at a time
+ movdqa XTMP0, X3
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ palignr XTMP0, X2, 4 ; XTMP0 = W[-7]
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ movdqa XTMP1, X1
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+ paddd XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ ;; compute s0
+ palignr XTMP1, X0, 4 ; XTMP1 = W[-15]
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ movdqa XTMP2, XTMP1 ; XTMP2 = W[-15]
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH
+ movdqa XTMP3, XTMP1 ; XTMP3 = W[-15]
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pslld XTMP1, (32-7)
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ psrld XTMP2, 7
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ por XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP2, XTMP3 ; XTMP2 = W[-15]
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ movdqa XTMP4, XTMP3 ; XTMP4 = W[-15]
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ pslld XTMP3, (32-18)
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+ psrld XTMP2, 18
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ pxor XTMP1, XTMP3
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ psrld XTMP4, 3 ; XTMP4 = W[-15] >> 3
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ pxor XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pxor XTMP1, XTMP4 ; XTMP1 = s0
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ ;; compute low s1
+ pshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ paddd XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA}
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ movdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA}
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA}
+ xor y2, g ; y2 = f^g
+ psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xBxA}
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ psrld XTMP4, 10 ; XTMP4 = W[-2] >> 10 {BBAA}
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ pxor XTMP2, XTMP3
+ add y2, y0 ; y2 = S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH
+ pxor XTMP4, XTMP2 ; XTMP4 = s1 {xBxA}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pshufb XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ paddd XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ ;; compute high s1
+ pshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC}
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC}
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ movdqa X0, XTMP2 ; X0 = W[-2] {DDCC}
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC}
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+ psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xDxC}
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ psrld X0, 10 ; X0 = W[-2] >> 10 {DDCC}
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ pxor XTMP2, XTMP3
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH
+ pxor X0, XTMP2 ; X0 = s1 {xDxC}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pshufb X0, SHUF_DC00 ; X0 = s1 {DC00}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ paddd X0, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+rotate_Xs
+%endm
+
+;; input is [rsp + _XFER + %1 * 4]
+%macro DO_ROUND 1
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ and y2, e ; y2 = (f^g)&e
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+ ROTATE_ARGS
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void FUNC(void *input_data, UINT32 digest[8])
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+section .text
+MKGLOBAL(FUNC,function,internal)
+align 32
+FUNC:
+ push rbx
+%ifndef LINUX
+ push rsi
+ push rdi
+%endif
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp,STACK_size
+%ifndef LINUX
+ movdqa [rsp + _XMM_SAVE + 0*16],xmm6
+ movdqa [rsp + _XMM_SAVE + 1*16],xmm7
+ movdqa [rsp + _XMM_SAVE + 2*16],xmm8
+ movdqa [rsp + _XMM_SAVE + 3*16],xmm9
+ movdqa [rsp + _XMM_SAVE + 4*16],xmm10
+ movdqa [rsp + _XMM_SAVE + 5*16],xmm11
+ movdqa [rsp + _XMM_SAVE + 6*16],xmm12
+%endif
+
+ ;; load initial digest
+ mov a, [4*0 + CTX]
+ mov b, [4*1 + CTX]
+ mov c, [4*2 + CTX]
+ mov d, [4*3 + CTX]
+ mov e, [4*4 + CTX]
+ mov f, [4*5 + CTX]
+ mov g, [4*6 + CTX]
+ mov h, [4*7 + CTX]
+
+ movdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ movdqa SHUF_00BA, [rel _SHUF_00BA]
+ movdqa SHUF_DC00, [rel _SHUF_DC00]
+
+ lea TBL,[rel K256]
+
+ ;; byte swap first 16 dwords
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+
+ ;; schedule 48 input dwords, by doing 3 rounds of 16 each
+ mov SRND, 3
+align 16
+loop1:
+ movdqa XFER, [TBL + 0*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 1*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 2*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 3*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ add TBL, 4*16
+ FOUR_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne loop1
+
+ mov SRND, 2
+loop2:
+ paddd X0, [TBL + 0*16]
+ movdqa [rsp + _XFER], X0
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+ paddd X1, [TBL + 1*16]
+ movdqa [rsp + _XFER], X1
+ add TBL, 2*16
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+
+ movdqa X0, X2
+ movdqa X1, X3
+
+ sub SRND, 1
+ jne loop2
+
+ add [4*0 + CTX], a
+ add [4*1 + CTX], b
+ add [4*2 + CTX], c
+ add [4*3 + CTX], d
+ add [4*4 + CTX], e
+ add [4*5 + CTX], f
+ add [4*6 + CTX], g
+ add [4*7 + CTX], h
+
+done_hash:
+%ifndef LINUX
+ movdqa xmm6,[rsp + _XMM_SAVE + 0*16]
+ movdqa xmm7,[rsp + _XMM_SAVE + 1*16]
+ movdqa xmm8,[rsp + _XMM_SAVE + 2*16]
+ movdqa xmm9,[rsp + _XMM_SAVE + 3*16]
+ movdqa xmm10,[rsp + _XMM_SAVE + 4*16]
+ movdqa xmm11,[rsp + _XMM_SAVE + 5*16]
+ movdqa xmm12,[rsp + _XMM_SAVE + 6*16]
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ pxor xmm0, xmm0
+ movdqa [rsp + _XMM_SAVE + 0 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 1 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 2 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 3 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 4 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 5 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 6 * 16], xmm0
+%endif
+%endif ;; LINUX
+
+ add rsp, STACK_size
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+%ifndef LINUX
+ pop rdi
+ pop rsi
+%endif
+ pop rbx
+
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha384_one_block_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha384_one_block_sse.asm
new file mode 100644
index 000000000..c95f89d8f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha384_one_block_sse.asm
@@ -0,0 +1,33 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define FUNC sha384_block_sse
+
+%include "sse/sha512_one_block_sse.asm"
diff --git a/src/spdk/intel-ipsec-mb/sse/sha512_one_block_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha512_one_block_sse.asm
new file mode 100644
index 000000000..534cfbfd8
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha512_one_block_sse.asm
@@ -0,0 +1,480 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%include "include/os.asm"
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+
+%ifndef FUNC
+%define FUNC sha512_block_sse
+%endif
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+; Load xmm with mem and byte swap each dword
+%macro COPY_XMM_AND_BSWAP 3
+ MOVDQ %1, %2
+ pshufb %1, %3
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define X0 xmm4
+%define X1 xmm5
+%define X2 xmm6
+%define X3 xmm7
+%define X4 xmm8
+%define X5 xmm9
+%define X6 xmm10
+%define X7 xmm11
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XTMP2 xmm2
+%define XTMP3 xmm3
+%define XFER xmm13
+
+%define BYTE_FLIP_MASK xmm12
+
+%ifdef LINUX
+%define CTX rsi ; 2nd arg
+%define INP rdi ; 1st arg
+
+%define SRND rdi ; clobbers INP
+%define c rcx
+%define d r8
+%define e rdx
+%else
+%define CTX rdx ; 2nd arg
+%define INP rcx ; 1st arg
+
+%define SRND rcx ; clobbers INP
+%define c rdi
+%define d rsi
+%define e r8
+
+%endif
+%define TBL rbp
+%define a rax
+%define b rbx
+
+%define f r9
+%define g r10
+%define h r11
+
+%define y0 r13
+%define y1 r14
+%define y2 r15
+
+
+struc STACK
+%ifndef LINUX
+_XMM_SAVE: reso 8
+%endif
+_XFER: reso 1
+endstruc
+
+; rotate_Xs
+; Rotate values of symbols X0...X7
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X4
+%xdefine X4 X5
+%xdefine X5 X6
+%xdefine X6 X7
+%xdefine X7 X_
+%endm
+
+; ROTATE_ARGS
+; Rotate values of symbols a...h
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+%macro TWO_ROUNDS_AND_SCHED 0
+
+ ;; compute s0 four at a time and s1 two at a time
+ ;; compute W[-16] + W[-7] 4 at a time
+ movdqa XTMP0, X5
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ ror y0, (41-18) ; y0 = e >> (41-18)
+ palignr XTMP0, X4, 8 ; XTMP0 = W[-7]
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ mov y2, f ; y2 = f
+ ror y1, (39-34) ; y1 = a >> (39-34)
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ movdqa XTMP1, X1
+ ror y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ xor y2, g ; y2 = f^g
+ paddq XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
+ ror y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
+ and y2, e ; y2 = (f^g)&e
+ ;; compute s0
+ palignr XTMP1, X0, 8 ; XTMP1 = W[-15]
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ movdqa XTMP2, XTMP1 ; XTMP2 = W[-15]
+ ror y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 0*8] ; y2 = k + w + S1 + CH
+ ror y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ movdqa XTMP3, XTMP1 ; XTMP3 = W[-15]
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ psllq XTMP1, (64-1)
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ psrlq XTMP2, 1
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ por XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = t1 + S0
+ movdqa XTMP2, XTMP3 ; XTMP2 = W[-15]
+ psrlq XTMP2, 8
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = t1 + S0 + MAJ
+ movdqa X0, XTMP3 ; X0 = W[-15]
+ psllq XTMP3, (64-8)
+
+
+ROTATE_ARGS
+ pxor XTMP1, XTMP3
+ psrlq X0, 7 ; X0 = W[-15] >> 7
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ pxor XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1 ^ W[-15] ror 8
+ ror y0, (41-18) ; y0 = e >> (41-18)
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ mov y2, f ; y2 = f
+ pxor XTMP1, X0 ; XTMP1 = s0
+ ror y1, (39-34) ; y1 = a >> (39-34)
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ ;; compute s1
+ movdqa XTMP2, X7 ; XTMP2 = W[-2]
+ ror y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ xor y2, g ; y2 = f^g
+ paddq XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
+ ror y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
+ movdqa XTMP3, XTMP2 ; XTMP3 = W[-2]
+ movdqa X0, XTMP2 ; X0 = W[-2]
+ and y2, e ; y2 = (f^g)&e
+ ror y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ psllq XTMP3, (64-19)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 1*8] ; y2 = k + w + S1 + CH
+ psrlq X0, 19
+ ror y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ por XTMP3, X0 ; XTMP3 = W[-2] ror 19
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ movdqa X0, XTMP2 ; X0 = W[-2]
+ movdqa XTMP1, XTMP2 ; XTMP1 = W[-2]
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ psllq X0, (64-61)
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = t1 + S0
+ psrlq XTMP1, 61
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = t1 + S0 + MAJ
+ por X0, XTMP1 ; X0 = W[-2] ror 61
+ psrlq XTMP2, 6 ; XTMP2 = W[-2] >> 6
+ pxor XTMP2, XTMP3
+ pxor X0, XTMP2 ; X0 = s1
+ paddq X0, XTMP0 ; X0 = {W[1], W[0]}
+
+ROTATE_ARGS
+rotate_Xs
+%endm
+
+;; input is [rsp + _XFER + %1 * 8]
+%macro DO_ROUND 1
+ mov y0, e ; y0 = e
+ ror y0, (41-18) ; y0 = e >> (41-18)
+ mov y1, a ; y1 = a
+ xor y0, e ; y0 = e ^ (e >> (41-18))
+ ror y1, (39-34) ; y1 = a >> (39-34)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (39-34)
+ ror y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
+ xor y2, g ; y2 = f^g
+ xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (25-6))
+ ror y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
+ and y2, e ; y2 = (f^g)&e
+ xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
+ ror y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ ror y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
+ add y2, [rsp + _XFER + %1*8] ; y2 = k + w + S1 + CH
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + t1
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = t1 + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = t1 + S0 + MAJ
+ ROTATE_ARGS
+%endm
+
+section .data
+default rel
+align 64
+K512:
+ dq 0x428a2f98d728ae22,0x7137449123ef65cd
+ dq 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ dq 0x3956c25bf348b538,0x59f111f1b605d019
+ dq 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ dq 0xd807aa98a3030242,0x12835b0145706fbe
+ dq 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ dq 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ dq 0x9bdc06a725c71235,0xc19bf174cf692694
+ dq 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ dq 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ dq 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ dq 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ dq 0x983e5152ee66dfab,0xa831c66d2db43210
+ dq 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ dq 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ dq 0x06ca6351e003826f,0x142929670a0e6e70
+ dq 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ dq 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ dq 0x650a73548baf63de,0x766a0abb3c77b2a8
+ dq 0x81c2c92e47edaee6,0x92722c851482353b
+ dq 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ dq 0xc24b8b70d0f89791,0xc76c51a30654be30
+ dq 0xd192e819d6ef5218,0xd69906245565a910
+ dq 0xf40e35855771202a,0x106aa07032bbd1b8
+ dq 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ dq 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ dq 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ dq 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ dq 0x748f82ee5defb2fc,0x78a5636f43172f60
+ dq 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ dq 0x90befffa23631e28,0xa4506cebde82bde9
+ dq 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ dq 0xca273eceea26619c,0xd186b8c721c0c207
+ dq 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ dq 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ dq 0x113f9804bef90dae,0x1b710b35131c471b
+ dq 0x28db77f523047d84,0x32caab7b40c72493
+ dq 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ dq 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ dq 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+align 16
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void FUNC(void *input_data, UINT64 digest[8])
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+section .text
+MKGLOBAL(FUNC,function,internal)
+align 32
+FUNC:
+ push rbx
+%ifndef LINUX
+ push rsi
+ push rdi
+%endif
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp,STACK_size
+%ifndef LINUX
+ movdqa [rsp + _XMM_SAVE + 0*16],xmm6
+ movdqa [rsp + _XMM_SAVE + 1*16],xmm7
+ movdqa [rsp + _XMM_SAVE + 2*16],xmm8
+ movdqa [rsp + _XMM_SAVE + 3*16],xmm9
+ movdqa [rsp + _XMM_SAVE + 4*16],xmm10
+ movdqa [rsp + _XMM_SAVE + 5*16],xmm11
+ movdqa [rsp + _XMM_SAVE + 6*16],xmm12
+ movdqa [rsp + _XMM_SAVE + 7*16],xmm13
+%endif
+
+ ;; load initial digest
+ mov a, [8*0 + CTX]
+ mov b, [8*1 + CTX]
+ mov c, [8*2 + CTX]
+ mov d, [8*3 + CTX]
+ mov e, [8*4 + CTX]
+ mov f, [8*5 + CTX]
+ mov g, [8*6 + CTX]
+ mov h, [8*7 + CTX]
+
+ movdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
+
+ lea TBL,[rel K512]
+
+ ;; byte swap first 16 qwords
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X4, [INP + 4*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X5, [INP + 5*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X6, [INP + 6*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X7, [INP + 7*16], BYTE_FLIP_MASK
+
+ ;; schedule 64 input qwords, by doing 4 iterations of 16 rounds
+ mov SRND, 4
+align 16
+loop1:
+
+%assign i 0
+%rep 7
+ movdqa XFER, X0
+ paddq XFER, [TBL + i*16]
+ movdqa [rsp + _XFER], XFER
+ TWO_ROUNDS_AND_SCHED
+%assign i (i+1)
+%endrep
+
+ movdqa XFER, X0
+ paddq XFER, [TBL + 7*16]
+ movdqa [rsp + _XFER], XFER
+ add TBL, 8*16
+ TWO_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne loop1
+
+ mov SRND, 2
+ jmp loop2a
+loop2:
+ movdqa X0, X4
+ movdqa X1, X5
+ movdqa X2, X6
+ movdqa X3, X7
+
+loop2a:
+ paddq X0, [TBL + 0*16]
+ movdqa [rsp + _XFER], X0
+ DO_ROUND 0
+ DO_ROUND 1
+
+ paddq X1, [TBL + 1*16]
+ movdqa [rsp + _XFER], X1
+ DO_ROUND 0
+ DO_ROUND 1
+
+ paddq X2, [TBL + 2*16]
+ movdqa [rsp + _XFER], X2
+ DO_ROUND 0
+ DO_ROUND 1
+
+ paddq X3, [TBL + 3*16]
+ movdqa [rsp + _XFER], X3
+ add TBL, 4*16
+ DO_ROUND 0
+ DO_ROUND 1
+
+ sub SRND, 1
+ jne loop2
+
+ add [8*0 + CTX], a
+ add [8*1 + CTX], b
+ add [8*2 + CTX], c
+ add [8*3 + CTX], d
+ add [8*4 + CTX], e
+ add [8*5 + CTX], f
+ add [8*6 + CTX], g
+ add [8*7 + CTX], h
+
+done_hash:
+%ifndef LINUX
+ movdqa xmm6,[rsp + _XMM_SAVE + 0*16]
+ movdqa xmm7,[rsp + _XMM_SAVE + 1*16]
+ movdqa xmm8,[rsp + _XMM_SAVE + 2*16]
+ movdqa xmm9,[rsp + _XMM_SAVE + 3*16]
+ movdqa xmm10,[rsp + _XMM_SAVE + 4*16]
+ movdqa xmm11,[rsp + _XMM_SAVE + 5*16]
+ movdqa xmm12,[rsp + _XMM_SAVE + 6*16]
+ movdqa xmm13,[rsp + _XMM_SAVE + 7*16]
+
+%ifdef SAFE_DATA
+ ;; Clear potential sensitive data stored in stack
+ pxor xmm0, xmm0
+ movdqa [rsp + _XMM_SAVE + 0 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 1 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 2 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 3 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 4 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 5 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 6 * 16], xmm0
+ movdqa [rsp + _XMM_SAVE + 7 * 16], xmm0
+%endif
+%endif ;; LINUX
+
+ add rsp, STACK_size
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+%ifndef LINUX
+ pop rdi
+ pop rsi
+%endif
+ pop rbx
+
+ ret
+
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha512_x2_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha512_x2_sse.asm
new file mode 100644
index 000000000..77043f29f
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha512_x2_sse.asm
@@ -0,0 +1,449 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute SHA512 by-2 using SSE
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rdx r8 r9 r10 r11
+;; Windows preserves: rbx rcx rsi rdi rbp r12 r13 r14 r15
+;;
+;; Linux clobbers: rax rsi r8 r9 r10 r11
+;; Linux preserves: rbx rcx rdx rdi rbp r12 r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+section .data
+default rel
+align 64
+MKGLOBAL(K512_2,data,internal)
+K512_2:
+ dq 0x428a2f98d728ae22, 0x428a2f98d728ae22
+ dq 0x7137449123ef65cd, 0x7137449123ef65cd
+ dq 0xb5c0fbcfec4d3b2f, 0xb5c0fbcfec4d3b2f
+ dq 0xe9b5dba58189dbbc, 0xe9b5dba58189dbbc
+ dq 0x3956c25bf348b538, 0x3956c25bf348b538
+ dq 0x59f111f1b605d019, 0x59f111f1b605d019
+ dq 0x923f82a4af194f9b, 0x923f82a4af194f9b
+ dq 0xab1c5ed5da6d8118, 0xab1c5ed5da6d8118
+ dq 0xd807aa98a3030242, 0xd807aa98a3030242
+ dq 0x12835b0145706fbe, 0x12835b0145706fbe
+ dq 0x243185be4ee4b28c, 0x243185be4ee4b28c
+ dq 0x550c7dc3d5ffb4e2, 0x550c7dc3d5ffb4e2
+ dq 0x72be5d74f27b896f, 0x72be5d74f27b896f
+ dq 0x80deb1fe3b1696b1, 0x80deb1fe3b1696b1
+ dq 0x9bdc06a725c71235, 0x9bdc06a725c71235
+ dq 0xc19bf174cf692694, 0xc19bf174cf692694
+ dq 0xe49b69c19ef14ad2, 0xe49b69c19ef14ad2
+ dq 0xefbe4786384f25e3, 0xefbe4786384f25e3
+ dq 0x0fc19dc68b8cd5b5, 0x0fc19dc68b8cd5b5
+ dq 0x240ca1cc77ac9c65, 0x240ca1cc77ac9c65
+ dq 0x2de92c6f592b0275, 0x2de92c6f592b0275
+ dq 0x4a7484aa6ea6e483, 0x4a7484aa6ea6e483
+ dq 0x5cb0a9dcbd41fbd4, 0x5cb0a9dcbd41fbd4
+ dq 0x76f988da831153b5, 0x76f988da831153b5
+ dq 0x983e5152ee66dfab, 0x983e5152ee66dfab
+ dq 0xa831c66d2db43210, 0xa831c66d2db43210
+ dq 0xb00327c898fb213f, 0xb00327c898fb213f
+ dq 0xbf597fc7beef0ee4, 0xbf597fc7beef0ee4
+ dq 0xc6e00bf33da88fc2, 0xc6e00bf33da88fc2
+ dq 0xd5a79147930aa725, 0xd5a79147930aa725
+ dq 0x06ca6351e003826f, 0x06ca6351e003826f
+ dq 0x142929670a0e6e70, 0x142929670a0e6e70
+ dq 0x27b70a8546d22ffc, 0x27b70a8546d22ffc
+ dq 0x2e1b21385c26c926, 0x2e1b21385c26c926
+ dq 0x4d2c6dfc5ac42aed, 0x4d2c6dfc5ac42aed
+ dq 0x53380d139d95b3df, 0x53380d139d95b3df
+ dq 0x650a73548baf63de, 0x650a73548baf63de
+ dq 0x766a0abb3c77b2a8, 0x766a0abb3c77b2a8
+ dq 0x81c2c92e47edaee6, 0x81c2c92e47edaee6
+ dq 0x92722c851482353b, 0x92722c851482353b
+ dq 0xa2bfe8a14cf10364, 0xa2bfe8a14cf10364
+ dq 0xa81a664bbc423001, 0xa81a664bbc423001
+ dq 0xc24b8b70d0f89791, 0xc24b8b70d0f89791
+ dq 0xc76c51a30654be30, 0xc76c51a30654be30
+ dq 0xd192e819d6ef5218, 0xd192e819d6ef5218
+ dq 0xd69906245565a910, 0xd69906245565a910
+ dq 0xf40e35855771202a, 0xf40e35855771202a
+ dq 0x106aa07032bbd1b8, 0x106aa07032bbd1b8
+ dq 0x19a4c116b8d2d0c8, 0x19a4c116b8d2d0c8
+ dq 0x1e376c085141ab53, 0x1e376c085141ab53
+ dq 0x2748774cdf8eeb99, 0x2748774cdf8eeb99
+ dq 0x34b0bcb5e19b48a8, 0x34b0bcb5e19b48a8
+ dq 0x391c0cb3c5c95a63, 0x391c0cb3c5c95a63
+ dq 0x4ed8aa4ae3418acb, 0x4ed8aa4ae3418acb
+ dq 0x5b9cca4f7763e373, 0x5b9cca4f7763e373
+ dq 0x682e6ff3d6b2b8a3, 0x682e6ff3d6b2b8a3
+ dq 0x748f82ee5defb2fc, 0x748f82ee5defb2fc
+ dq 0x78a5636f43172f60, 0x78a5636f43172f60
+ dq 0x84c87814a1f0ab72, 0x84c87814a1f0ab72
+ dq 0x8cc702081a6439ec, 0x8cc702081a6439ec
+ dq 0x90befffa23631e28, 0x90befffa23631e28
+ dq 0xa4506cebde82bde9, 0xa4506cebde82bde9
+ dq 0xbef9a3f7b2c67915, 0xbef9a3f7b2c67915
+ dq 0xc67178f2e372532b, 0xc67178f2e372532b
+ dq 0xca273eceea26619c, 0xca273eceea26619c
+ dq 0xd186b8c721c0c207, 0xd186b8c721c0c207
+ dq 0xeada7dd6cde0eb1e, 0xeada7dd6cde0eb1e
+ dq 0xf57d4f7fee6ed178, 0xf57d4f7fee6ed178
+ dq 0x06f067aa72176fba, 0x06f067aa72176fba
+ dq 0x0a637dc5a2c898a6, 0x0a637dc5a2c898a6
+ dq 0x113f9804bef90dae, 0x113f9804bef90dae
+ dq 0x1b710b35131c471b, 0x1b710b35131c471b
+ dq 0x28db77f523047d84, 0x28db77f523047d84
+ dq 0x32caab7b40c72493, 0x32caab7b40c72493
+ dq 0x3c9ebe0a15c9bebc, 0x3c9ebe0a15c9bebc
+ dq 0x431d67c49c100d4c, 0x431d67c49c100d4c
+ dq 0x4cc5d4becb3e42b6, 0x4cc5d4becb3e42b6
+ dq 0x597f299cfc657e2a, 0x597f299cfc657e2a
+ dq 0x5fcb6fab3ad6faec, 0x5fcb6fab3ad6faec
+ dq 0x6c44198c4a475817, 0x6c44198c4a475817
+
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
+ dq 0x0001020304050607, 0x08090a0b0c0d0e0f
+
+section .text
+
+%ifdef LINUX ; Linux definitions
+ %define arg1 rdi
+ %define arg2 rsi
+%else ; Windows definitions
+ %define arg1 rcx
+ %define arg2 rdx
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND r8
+%define TBL r11
+
+%define inp0 r9
+%define inp1 r10
+
+%define a xmm0
+%define b xmm1
+%define c xmm2
+%define d xmm3
+%define e xmm4
+%define f xmm5
+%define g xmm6
+%define h xmm7
+
+%define a0 xmm8
+%define a1 xmm9
+%define a2 xmm10
+
+%define TT0 xmm14
+%define TT1 xmm13
+%define TT2 xmm12
+%define TT3 xmm11
+%define TT4 xmm10
+%define TT5 xmm9
+
+%define T1 xmm14
+%define TMP xmm15
+
+
+
+%define SZ2 2*SHA512_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 80*SZ2
+
+; Define stack usage
+
+struc STACK
+_DATA: resb SZ2 * 16
+_DIGEST: resb SZ2 * NUM_SHA512_DIGEST_WORDS
+ resb 8 ; for alignment, must be odd multiple of 8
+endstruc
+
+%define MOVPD movupd
+
+; transpose r0, r1, t0
+; Input looks like {r0 r1}
+; r0 = {a1 a0}
+; r1 = {b1 b0}
+;
+; output looks like
+; r0 = {b0, a0}
+; t0 = {b1, a1}
+
+%macro TRANSPOSE 3
+%define %%r0 %1
+%define %%r1 %2
+%define %%t0 %3
+ movapd %%t0, %%r0 ; t0 = a1 a0
+ shufpd %%r0, %%r1, 00b ; r0 = b0 a0
+ shufpd %%t0, %%r1, 11b ; t0 = b1 a1
+%endm
+
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORQ reg, imm, tmp
+; packed-rotate-right-double
+; does a rotate by doing two shifts and an or
+%macro PRORQ 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ movdqa %%tmp, %%reg
+ psllq %%tmp, (64-(%%imm))
+ psrlq %%reg, %%imm
+ por %%reg, %%tmp
+%endmacro
+
+; PRORQ dst/src, amt
+%macro PRORQ 2
+ PRORQ %1, %2, TMP
+%endmacro
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ movdqa a0, e ; sig1: a0 = e
+ movdqa a1, e ; sig1: s1 = e
+ PRORQ a0, (18-14) ; sig1: a0 = (e >> 4)
+
+ movdqa a2, f ; ch: a2 = f
+ pxor a2, g ; ch: a2 = f^g
+ pand a2, e ; ch: a2 = (f^g)&e
+ pxor a2, g ; a2 = ch
+
+ PRORQ a1, 41 ; sig1: a1 = (e >> 41)
+ movdqa [SZ2*(%%i&0xf) + rsp],%%T1
+ paddq %%T1,[TBL + ROUND] ; T1 = W + K
+ pxor a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORQ a0, 14 ; sig1: a0 = (e >> 14) ^ (e >> 18)
+ paddq h, a2 ; h = h + ch
+ movdqa a2, a ; sig0: a2 = a
+ PRORQ a2, (34-28) ; sig0: a2 = (a >> 6)
+ paddq h, %%T1 ; h = h + ch + W + K
+ pxor a0, a1 ; a0 = sigma1
+ movdqa a1, a ; sig0: a1 = a
+ movdqa %%T1, a ; maj: T1 = a
+ PRORQ a1, 39 ; sig0: a1 = (a >> 39)
+ pxor %%T1, c ; maj: T1 = a^c
+ add ROUND, SZ2 ; ROUND++
+ pand %%T1, b ; maj: T1 = (a^c)&b
+ paddq h, a0
+
+ paddq d, h
+
+ pxor a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORQ a2, 28 ; sig0: a2 = (a >> 28) ^ (a >> 34)
+ pxor a2, a1 ; a2 = sig0
+ movdqa a1, a ; maj: a1 = a
+ pand a1, c ; maj: a1 = a&c
+ por a1, %%T1 ; a1 = maj
+ paddq h, a1 ; h = h + ch + W + K + maj
+ paddq h, a2 ; h = h + ch + W + K + maj + sigma0
+
+ ROTATE_ARGS
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ movdqa %%T1, [SZ2*((%%i-15)&0xf) + rsp]
+ movdqa a1, [SZ2*((%%i-2)&0xf) + rsp]
+ movdqa a0, %%T1
+ PRORQ %%T1, 8-1
+ movdqa a2, a1
+ PRORQ a1, 61-19
+ pxor %%T1, a0
+ PRORQ %%T1, 1
+ pxor a1, a2
+ PRORQ a1, 19
+ psrlq a0, 7
+ pxor %%T1, a0
+ psrlq a2, 6
+ pxor a1, a2
+ paddq %%T1, [SZ2*((%%i-16)&0xf) + rsp]
+ paddq a1, [SZ2*((%%i-7)&0xf) + rsp]
+ paddq %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+%endm
+
+
+
+;; SHA512_ARGS:
+;; UINT128 digest[8]; // transposed digests
+;; UINT8 *data_ptr[2];
+;;
+
+;; void sha512_x2_sse(SHA512_ARGS *args, UINT64 num_blocks);
+;; arg 1 : STATE : pointer args
+;; arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+;;
+MKGLOBAL(sha512_x2_sse,function,internal)
+align 32
+sha512_x2_sse:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+ sub rsp, STACK_size
+
+ ;; Load the pre-transposed incoming digest.
+ movdqa a,[STATE + 0 * SHA512_DIGEST_ROW_SIZE]
+ movdqa b,[STATE + 1 * SHA512_DIGEST_ROW_SIZE]
+ movdqa c,[STATE + 2 * SHA512_DIGEST_ROW_SIZE]
+ movdqa d,[STATE + 3 * SHA512_DIGEST_ROW_SIZE]
+ movdqa e,[STATE + 4 * SHA512_DIGEST_ROW_SIZE]
+ movdqa f,[STATE + 5 * SHA512_DIGEST_ROW_SIZE]
+ movdqa g,[STATE + 6 * SHA512_DIGEST_ROW_SIZE]
+ movdqa h,[STATE + 7 * SHA512_DIGEST_ROW_SIZE]
+
+ DBGPRINTL_XMM "incoming transposed sha512 digest", a, b, c, d, e, f, g, h
+ lea TBL,[rel K512_2]
+
+ ;; load the address of each of the 2 message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha512 +0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha512 +1*PTR_SZ]
+
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+ DBGPRINTL64 "lloop enter INP_SIZE ", INP_SIZE
+ DBGPRINTL64 " IDX = ", IDX
+ ;; save old digest
+ movdqa [rsp + _DIGEST + 0*SZ2], a
+ movdqa [rsp + _DIGEST + 1*SZ2], b
+ movdqa [rsp + _DIGEST + 2*SZ2], c
+ movdqa [rsp + _DIGEST + 3*SZ2], d
+ movdqa [rsp + _DIGEST + 4*SZ2], e
+ movdqa [rsp + _DIGEST + 5*SZ2], f
+ movdqa [rsp + _DIGEST + 6*SZ2], g
+ movdqa [rsp + _DIGEST + 7*SZ2], h
+
+ DBGPRINTL "incoming data["
+%assign i 0
+%rep 8
+ ;; load up the shuffler for little-endian to big-endian format
+ movdqa TMP, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ MOVPD TT0,[inp0+IDX+i*16] ;; double precision is 64 bits
+ MOVPD TT2,[inp1+IDX+i*16]
+ DBGPRINTL_XMM "input message block", TT0
+ TRANSPOSE TT0, TT2, TT1
+ pshufb TT0, TMP
+ pshufb TT1, TMP
+ ROUND_00_15 TT0,(i*2+0)
+ ROUND_00_15 TT1,(i*2+1)
+%assign i (i+1)
+%endrep
+ DBGPRINTL "]"
+ add IDX, 8 * 16 ;; increment by a message block
+
+
+%assign i (i*4)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ paddq a, [rsp + _DIGEST + 0*SZ2]
+ paddq b, [rsp + _DIGEST + 1*SZ2]
+ paddq c, [rsp + _DIGEST + 2*SZ2]
+ paddq d, [rsp + _DIGEST + 3*SZ2]
+ paddq e, [rsp + _DIGEST + 4*SZ2]
+ paddq f, [rsp + _DIGEST + 5*SZ2]
+ paddq g, [rsp + _DIGEST + 6*SZ2]
+ paddq h, [rsp + _DIGEST + 7*SZ2]
+
+ sub INP_SIZE, 1 ;; unit is blocks
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ movdqa [STATE + 0*SHA512_DIGEST_ROW_SIZE],a
+ movdqa [STATE + 1*SHA512_DIGEST_ROW_SIZE],b
+ movdqa [STATE + 2*SHA512_DIGEST_ROW_SIZE],c
+ movdqa [STATE + 3*SHA512_DIGEST_ROW_SIZE],d
+ movdqa [STATE + 4*SHA512_DIGEST_ROW_SIZE],e
+ movdqa [STATE + 5*SHA512_DIGEST_ROW_SIZE],f
+ movdqa [STATE + 6*SHA512_DIGEST_ROW_SIZE],g
+ movdqa [STATE + 7*SHA512_DIGEST_ROW_SIZE],h
+ DBGPRINTL_XMM "exit transposed digest ", a, b, c, d, e, f, g, h
+
+ ; update input pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha512 + 0*PTR_SZ], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha512 + 1*PTR_SZ], inp1
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+ ;; Clear stack frame ((16 + 8)*16 bytes)
+%ifdef SAFE_DATA
+ pxor xmm0, xmm0
+%assign i 0
+%rep (16+NUM_SHA512_DIGEST_WORDS)
+ movdqa [rsp + i*SZ2], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, STACK_size
+DBGPRINTL "====================== exit sha512_x2_sse code =====================\n"
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/sha_256_mult_sse.asm b/src/spdk/intel-ipsec-mb/sse/sha_256_mult_sse.asm
new file mode 100644
index 000000000..954d6597e
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/sha_256_mult_sse.asm
@@ -0,0 +1,457 @@
+;;
+;; Copyright (c) 2012-2018, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+;; code to compute quad SHA256 using SSE
+;; outer calling routine takes care of save and restore of XMM registers
+;; Logic designed/laid out by JDG
+
+;; Stack must be aligned to 16 bytes before call
+;; Windows clobbers: rax rbx rdx r8 r9 r10 r11 r12
+;; Windows preserves: rcx rsi rdi rbp r12 r14 r15
+;;
+;; Linux clobbers: rax rbx rsi r8 r9 r10 r11 r12
+;; Linux preserves: rcx rdx rdi rbp r13 r14 r15
+;;
+;; clobbers xmm0-15
+
+%include "include/os.asm"
+%include "mb_mgr_datastruct.asm"
+
+;%define DO_DBGPRINT
+%include "include/dbgprint.asm"
+
+section .data
+default rel
+align 64
+MKGLOBAL(K256_4,data,internal)
+K256_4:
+ dq 0x428a2f98428a2f98, 0x428a2f98428a2f98
+ dq 0x7137449171374491, 0x7137449171374491
+ dq 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
+ dq 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
+ dq 0x3956c25b3956c25b, 0x3956c25b3956c25b
+ dq 0x59f111f159f111f1, 0x59f111f159f111f1
+ dq 0x923f82a4923f82a4, 0x923f82a4923f82a4
+ dq 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
+ dq 0xd807aa98d807aa98, 0xd807aa98d807aa98
+ dq 0x12835b0112835b01, 0x12835b0112835b01
+ dq 0x243185be243185be, 0x243185be243185be
+ dq 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
+ dq 0x72be5d7472be5d74, 0x72be5d7472be5d74
+ dq 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
+ dq 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
+ dq 0xc19bf174c19bf174, 0xc19bf174c19bf174
+ dq 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
+ dq 0xefbe4786efbe4786, 0xefbe4786efbe4786
+ dq 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
+ dq 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
+ dq 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
+ dq 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
+ dq 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
+ dq 0x76f988da76f988da, 0x76f988da76f988da
+ dq 0x983e5152983e5152, 0x983e5152983e5152
+ dq 0xa831c66da831c66d, 0xa831c66da831c66d
+ dq 0xb00327c8b00327c8, 0xb00327c8b00327c8
+ dq 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
+ dq 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
+ dq 0xd5a79147d5a79147, 0xd5a79147d5a79147
+ dq 0x06ca635106ca6351, 0x06ca635106ca6351
+ dq 0x1429296714292967, 0x1429296714292967
+ dq 0x27b70a8527b70a85, 0x27b70a8527b70a85
+ dq 0x2e1b21382e1b2138, 0x2e1b21382e1b2138
+ dq 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
+ dq 0x53380d1353380d13, 0x53380d1353380d13
+ dq 0x650a7354650a7354, 0x650a7354650a7354
+ dq 0x766a0abb766a0abb, 0x766a0abb766a0abb
+ dq 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
+ dq 0x92722c8592722c85, 0x92722c8592722c85
+ dq 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
+ dq 0xa81a664ba81a664b, 0xa81a664ba81a664b
+ dq 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
+ dq 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
+ dq 0xd192e819d192e819, 0xd192e819d192e819
+ dq 0xd6990624d6990624, 0xd6990624d6990624
+ dq 0xf40e3585f40e3585, 0xf40e3585f40e3585
+ dq 0x106aa070106aa070, 0x106aa070106aa070
+ dq 0x19a4c11619a4c116, 0x19a4c11619a4c116
+ dq 0x1e376c081e376c08, 0x1e376c081e376c08
+ dq 0x2748774c2748774c, 0x2748774c2748774c
+ dq 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
+ dq 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
+ dq 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
+ dq 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
+ dq 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
+ dq 0x748f82ee748f82ee, 0x748f82ee748f82ee
+ dq 0x78a5636f78a5636f, 0x78a5636f78a5636f
+ dq 0x84c8781484c87814, 0x84c8781484c87814
+ dq 0x8cc702088cc70208, 0x8cc702088cc70208
+ dq 0x90befffa90befffa, 0x90befffa90befffa
+ dq 0xa4506ceba4506ceb, 0xa4506ceba4506ceb
+ dq 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
+ dq 0xc67178f2c67178f2, 0xc67178f2c67178f2
+PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x0c0d0e0f08090a0b0405060700010203
+ dq 0x0405060700010203, 0x0c0d0e0f08090a0b
+
+section .text
+
+%ifdef LINUX ; Linux definitions
+ %define arg1 rdi
+ %define arg2 rsi
+%else ; Windows definitions
+ %define arg1 rcx
+ %define arg2 rdx
+%endif
+
+; Common definitions
+%define STATE arg1
+%define INP_SIZE arg2
+
+%define IDX rax
+%define ROUND rbx
+%define TBL r12
+
+%define inp0 r8
+%define inp1 r9
+%define inp2 r10
+%define inp3 r11
+
+%define a xmm0
+%define b xmm1
+%define c xmm2
+%define d xmm3
+%define e xmm4
+%define f xmm5
+%define g xmm6
+%define h xmm7
+
+%define a0 xmm8
+%define a1 xmm9
+%define a2 xmm10
+
+%define TT0 xmm14
+%define TT1 xmm13
+%define TT2 xmm12
+%define TT3 xmm11
+%define TT4 xmm10
+%define TT5 xmm9
+
+%define T1 xmm14
+%define TMP xmm15
+
+%define SZ4 4*SHA256_DIGEST_WORD_SIZE ; Size of one vector register
+%define ROUNDS 64*SZ4
+
+; Define stack usage
+struc STACK
+_DATA: resb SZ4 * 16
+_DIGEST: resb SZ4 * NUM_SHA256_DIGEST_WORDS
+ resb 8 ; for alignment, must be odd multiple of 8
+endstruc
+
+%define MOVPS movups
+
+; transpose r0, r1, r2, r3, t0, t1
+; "transpose" data in {r0..r3} using temps {t0..t3}
+; Input looks like: {r0 r1 r2 r3}
+; r0 = {a3 a2 a1 a0}
+; r1 = {b3 b2 b1 b0}
+; r2 = {c3 c2 c1 c0}
+; r3 = {d3 d2 d1 d0}
+;
+; output looks like: {t0 r1 r0 r3}
+; t0 = {d0 c0 b0 a0}
+; r1 = {d1 c1 b1 a1}
+; r0 = {d2 c2 b2 a2}
+; r3 = {d3 c3 b3 a3}
+;
+%macro TRANSPOSE 6
+%define %%r0 %1
+%define %%r1 %2
+%define %%r2 %3
+%define %%r3 %4
+%define %%t0 %5
+%define %%t1 %6
+ movaps %%t0, %%r0 ; t0 = {a3 a2 a1 a0}
+ shufps %%t0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
+ shufps %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
+
+ movaps %%t1, %%r2 ; t1 = {c3 c2 c1 c0}
+ shufps %%t1, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
+ shufps %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
+
+ movaps %%r1, %%t0 ; r1 = {b1 b0 a1 a0}
+ shufps %%r1, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
+
+ movaps %%r3, %%r0 ; r3 = {b3 b2 a3 a2}
+ shufps %%r3, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
+
+ shufps %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
+ shufps %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
+%endmacro
+
+
+
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+; PRORD reg, imm, tmp
+%macro PRORD 3
+%define %%reg %1
+%define %%imm %2
+%define %%tmp %3
+ movdqa %%tmp, %%reg
+ psrld %%reg, %%imm
+ pslld %%tmp, (32-(%%imm))
+ por %%reg, %%tmp
+%endmacro
+
+%macro PRORD 2
+ PRORD %1, %2, TMP
+%endmacro
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_00_15 2
+%define %%T1 %1
+%define %%i %2
+ movdqa a0, e ; sig1: a0 = e
+ movdqa a1, e ; sig1: s1 = e
+ PRORD a0, (11-6) ; sig1: a0 = (e >> 5)
+
+ movdqa a2, f ; ch: a2 = f
+ pxor a2, g ; ch: a2 = f^g
+ pand a2, e ; ch: a2 = (f^g)&e
+ pxor a2, g ; a2 = ch
+
+ PRORD a1, 25 ; sig1: a1 = (e >> 25)
+ movdqa [SZ4*(%%i&0xf) + rsp],%%T1
+ paddd %%T1,[TBL + ROUND] ; T1 = W + K
+ pxor a0, e ; sig1: a0 = e ^ (e >> 5)
+ PRORD a0, 6 ; sig1: a0 = (e >> 6) ^ (e >> 11)
+ paddd h, a2 ; h = h + ch
+ movdqa a2, a ; sig0: a2 = a
+ PRORD a2, (13-2) ; sig0: a2 = (a >> 11)
+ paddd h, %%T1 ; h = h + ch + W + K
+ pxor a0, a1 ; a0 = sigma1
+ movdqa a1, a ; sig0: a1 = a
+ movdqa %%T1, a ; maj: T1 = a
+ PRORD a1, 22 ; sig0: a1 = (a >> 22)
+ pxor %%T1, c ; maj: T1 = a^c
+ add ROUND, SZ4 ; ROUND++
+ pand %%T1, b ; maj: T1 = (a^c)&b
+ paddd h, a0
+
+ paddd d, h
+
+ pxor a2, a ; sig0: a2 = a ^ (a >> 11)
+ PRORD a2, 2 ; sig0: a2 = (a >> 2) ^ (a >> 13)
+ pxor a2, a1 ; a2 = sig0
+ movdqa a1, a ; maj: a1 = a
+ pand a1, c ; maj: a1 = a&c
+ por a1, %%T1 ; a1 = maj
+ paddd h, a1 ; h = h + ch + W + K + maj
+ paddd h, a2 ; h = h + ch + W + K + maj + sigma0
+
+ ROTATE_ARGS
+%endm
+
+
+;; arguments passed implicitly in preprocessor symbols i, a...h
+%macro ROUND_16_XX 2
+%define %%T1 %1
+%define %%i %2
+ movdqa %%T1, [SZ4*((%%i-15)&0xf) + rsp]
+ movdqa a1, [SZ4*((%%i-2)&0xf) + rsp]
+ movdqa a0, %%T1
+ PRORD %%T1, 18-7
+ movdqa a2, a1
+ PRORD a1, 19-17
+ pxor %%T1, a0
+ PRORD %%T1, 7
+ pxor a1, a2
+ PRORD a1, 17
+ psrld a0, 3
+ pxor %%T1, a0
+ psrld a2, 10
+ pxor a1, a2
+ paddd %%T1, [SZ4*((%%i-16)&0xf) + rsp]
+ paddd a1, [SZ4*((%%i-7)&0xf) + rsp]
+ paddd %%T1, a1
+
+ ROUND_00_15 %%T1, %%i
+%endm
+
+
+
+;; SHA256_ARGS:
+;; UINT128 digest[8]; // transposed digests
+;; UINT8 *data_ptr[4];
+;;
+
+;; void sha_256_mult_sse(SHA256_ARGS *args, UINT64 num_blocks);
+;; arg 1 : STATE : pointer args
+;; arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
+;;
+MKGLOBAL(sha_256_mult_sse,function,internal)
+align 32
+sha_256_mult_sse:
+ ; general registers preserved in outer calling routine
+ ; outer calling routine saves all the XMM registers
+ sub rsp, STACK_size
+
+ ;; Load the pre-transposed incoming digest.
+ movdqa a,[STATE + 0 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa b,[STATE + 1 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa c,[STATE + 2 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa d,[STATE + 3 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa e,[STATE + 4 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa f,[STATE + 5 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa g,[STATE + 6 * SHA256_DIGEST_ROW_SIZE ]
+ movdqa h,[STATE + 7 * SHA256_DIGEST_ROW_SIZE ]
+
+ DBGPRINTL_XMM "incoming transposed sha256 digest", a, b, c, d, e, f, g, h
+ lea TBL,[rel K256_4]
+
+ ;; load the address of each of the 4 message lanes
+ ;; getting ready to transpose input onto stack
+ mov inp0,[STATE + _data_ptr_sha256 + 0*PTR_SZ]
+ mov inp1,[STATE + _data_ptr_sha256 + 1*PTR_SZ]
+ mov inp2,[STATE + _data_ptr_sha256 + 2*PTR_SZ]
+ mov inp3,[STATE + _data_ptr_sha256 + 3*PTR_SZ]
+ DBGPRINTL64 "incoming input data ptrs ", inp0, inp1, inp2, inp3
+ xor IDX, IDX
+lloop:
+ xor ROUND, ROUND
+
+ ;; save old digest
+ movdqa [rsp + _DIGEST + 0*SZ4], a
+ movdqa [rsp + _DIGEST + 1*SZ4], b
+ movdqa [rsp + _DIGEST + 2*SZ4], c
+ movdqa [rsp + _DIGEST + 3*SZ4], d
+ movdqa [rsp + _DIGEST + 4*SZ4], e
+ movdqa [rsp + _DIGEST + 5*SZ4], f
+ movdqa [rsp + _DIGEST + 6*SZ4], g
+ movdqa [rsp + _DIGEST + 7*SZ4], h
+
+%assign i 0
+%rep 4
+ movdqa TMP, [rel PSHUFFLE_BYTE_FLIP_MASK]
+ MOVPS TT2,[inp0+IDX+i*16]
+ MOVPS TT1,[inp1+IDX+i*16]
+ MOVPS TT4,[inp2+IDX+i*16]
+ MOVPS TT3,[inp3+IDX+i*16]
+ TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
+ pshufb TT0, TMP
+ pshufb TT1, TMP
+ pshufb TT2, TMP
+ pshufb TT3, TMP
+ ROUND_00_15 TT0,(i*4+0)
+ ROUND_00_15 TT1,(i*4+1)
+ ROUND_00_15 TT2,(i*4+2)
+ ROUND_00_15 TT3,(i*4+3)
+%assign i (i+1)
+%endrep
+ add IDX, 4*4*4
+
+
+%assign i (i*4)
+
+ jmp Lrounds_16_xx
+align 16
+Lrounds_16_xx:
+%rep 16
+ ROUND_16_XX T1, i
+%assign i (i+1)
+%endrep
+
+ cmp ROUND,ROUNDS
+ jb Lrounds_16_xx
+
+ ;; add old digest
+ paddd a, [rsp + _DIGEST + 0*SZ4]
+ paddd b, [rsp + _DIGEST + 1*SZ4]
+ paddd c, [rsp + _DIGEST + 2*SZ4]
+ paddd d, [rsp + _DIGEST + 3*SZ4]
+ paddd e, [rsp + _DIGEST + 4*SZ4]
+ paddd f, [rsp + _DIGEST + 5*SZ4]
+ paddd g, [rsp + _DIGEST + 6*SZ4]
+ paddd h, [rsp + _DIGEST + 7*SZ4]
+
+ sub INP_SIZE, 1 ;; unit is blocks
+ jne lloop
+
+ ; write back to memory (state object) the transposed digest
+ movdqa [STATE+0*SHA256_DIGEST_ROW_SIZE ],a
+ movdqa [STATE+1*SHA256_DIGEST_ROW_SIZE ],b
+ movdqa [STATE+2*SHA256_DIGEST_ROW_SIZE ],c
+ movdqa [STATE+3*SHA256_DIGEST_ROW_SIZE ],d
+ movdqa [STATE+4*SHA256_DIGEST_ROW_SIZE ],e
+ movdqa [STATE+5*SHA256_DIGEST_ROW_SIZE ],f
+ movdqa [STATE+6*SHA256_DIGEST_ROW_SIZE ],g
+ movdqa [STATE+7*SHA256_DIGEST_ROW_SIZE ],h
+ DBGPRINTL_XMM "updated transposed sha256 digest", a, b, c, d, e, f, g, h
+
+ ; update input pointers
+ add inp0, IDX
+ mov [STATE + _data_ptr_sha256 + 0*8], inp0
+ add inp1, IDX
+ mov [STATE + _data_ptr_sha256 + 1*8], inp1
+ add inp2, IDX
+ mov [STATE + _data_ptr_sha256 + 2*8], inp2
+ add inp3, IDX
+ mov [STATE + _data_ptr_sha256 + 3*8], inp3
+
+ DBGPRINTL64 "updated input data ptrs ", inp0, inp1, inp2, inp3
+
+ ;;;;;;;;;;;;;;;;
+ ;; Postamble
+
+%ifdef SAFE_DATA
+ ;; Clear stack frame ((16 + 8)*16 bytes)
+ pxor xmm0, xmm0
+%assign i 0
+%rep (16+NUM_SHA256_DIGEST_WORDS)
+ movdqa [rsp + i*SZ4], xmm0
+%assign i (i+1)
+%endrep
+%endif
+
+ add rsp, STACK_size
+ ; outer calling routine restores XMM and other GP registers
+ ret
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/snow3g_sse.c b/src/spdk/intel-ipsec-mb/sse/snow3g_sse.c
new file mode 100644
index 000000000..aadd85633
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/snow3g_sse.c
@@ -0,0 +1,42 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#define SSE
+#define SNOW3G_F8_1_BUFFER_BIT snow3g_f8_1_buffer_bit_sse
+#define SNOW3G_F8_1_BUFFER snow3g_f8_1_buffer_sse
+#define SNOW3G_F8_2_BUFFER snow3g_f8_2_buffer_sse
+#define SNOW3G_F8_4_BUFFER snow3g_f8_4_buffer_sse
+#define SNOW3G_F8_8_BUFFER snow3g_f8_8_buffer_sse
+#define SNOW3G_F8_N_BUFFER snow3g_f8_n_buffer_sse
+#define SNOW3G_F8_8_BUFFER_MULTIKEY snow3g_f8_8_buffer_multikey_sse
+#define SNOW3G_F8_N_BUFFER_MULTIKEY snow3g_f8_n_buffer_multikey_sse
+#define SNOW3G_F9_1_BUFFER snow3g_f9_1_buffer_sse
+#define SNOW3G_INIT_KEY_SCHED snow3g_init_key_sched_sse
+#define SNOW3G_KEY_SCHED_SIZE snow3g_key_sched_size_sse
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+#include "include/snow3g_common.h"
diff --git a/src/spdk/intel-ipsec-mb/sse/zuc_sse.asm b/src/spdk/intel-ipsec-mb/sse/zuc_sse.asm
new file mode 100755
index 000000000..0f4e490f9
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/zuc_sse.asm
@@ -0,0 +1,1152 @@
+;;
+;; Copyright (c) 2009-2019, Intel Corporation
+;;
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are met:
+;;
+;; * Redistributions of source code must retain the above copyright notice,
+;; this list of conditions and the following disclaimer.
+;; * Redistributions in binary form must reproduce the above copyright
+;; notice, this list of conditions and the following disclaimer in the
+;; documentation and/or other materials provided with the distribution.
+;; * Neither the name of Intel Corporation nor the names of its contributors
+;; may be used to endorse or promote products derived from this software
+;; without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;
+
+%include "include/os.asm"
+%include "include/reg_sizes.asm"
+
+extern lookup_8bit_sse
+
+section .data
+default rel
+align 64
+S0:
+db 0x3e,0x72,0x5b,0x47,0xca,0xe0,0x00,0x33,0x04,0xd1,0x54,0x98,0x09,0xb9,0x6d,0xcb
+db 0x7b,0x1b,0xf9,0x32,0xaf,0x9d,0x6a,0xa5,0xb8,0x2d,0xfc,0x1d,0x08,0x53,0x03,0x90
+db 0x4d,0x4e,0x84,0x99,0xe4,0xce,0xd9,0x91,0xdd,0xb6,0x85,0x48,0x8b,0x29,0x6e,0xac
+db 0xcd,0xc1,0xf8,0x1e,0x73,0x43,0x69,0xc6,0xb5,0xbd,0xfd,0x39,0x63,0x20,0xd4,0x38
+db 0x76,0x7d,0xb2,0xa7,0xcf,0xed,0x57,0xc5,0xf3,0x2c,0xbb,0x14,0x21,0x06,0x55,0x9b
+db 0xe3,0xef,0x5e,0x31,0x4f,0x7f,0x5a,0xa4,0x0d,0x82,0x51,0x49,0x5f,0xba,0x58,0x1c
+db 0x4a,0x16,0xd5,0x17,0xa8,0x92,0x24,0x1f,0x8c,0xff,0xd8,0xae,0x2e,0x01,0xd3,0xad
+db 0x3b,0x4b,0xda,0x46,0xeb,0xc9,0xde,0x9a,0x8f,0x87,0xd7,0x3a,0x80,0x6f,0x2f,0xc8
+db 0xb1,0xb4,0x37,0xf7,0x0a,0x22,0x13,0x28,0x7c,0xcc,0x3c,0x89,0xc7,0xc3,0x96,0x56
+db 0x07,0xbf,0x7e,0xf0,0x0b,0x2b,0x97,0x52,0x35,0x41,0x79,0x61,0xa6,0x4c,0x10,0xfe
+db 0xbc,0x26,0x95,0x88,0x8a,0xb0,0xa3,0xfb,0xc0,0x18,0x94,0xf2,0xe1,0xe5,0xe9,0x5d
+db 0xd0,0xdc,0x11,0x66,0x64,0x5c,0xec,0x59,0x42,0x75,0x12,0xf5,0x74,0x9c,0xaa,0x23
+db 0x0e,0x86,0xab,0xbe,0x2a,0x02,0xe7,0x67,0xe6,0x44,0xa2,0x6c,0xc2,0x93,0x9f,0xf1
+db 0xf6,0xfa,0x36,0xd2,0x50,0x68,0x9e,0x62,0x71,0x15,0x3d,0xd6,0x40,0xc4,0xe2,0x0f
+db 0x8e,0x83,0x77,0x6b,0x25,0x05,0x3f,0x0c,0x30,0xea,0x70,0xb7,0xa1,0xe8,0xa9,0x65
+db 0x8d,0x27,0x1a,0xdb,0x81,0xb3,0xa0,0xf4,0x45,0x7a,0x19,0xdf,0xee,0x78,0x34,0x60
+
+S1:
+db 0x55,0xc2,0x63,0x71,0x3b,0xc8,0x47,0x86,0x9f,0x3c,0xda,0x5b,0x29,0xaa,0xfd,0x77
+db 0x8c,0xc5,0x94,0x0c,0xa6,0x1a,0x13,0x00,0xe3,0xa8,0x16,0x72,0x40,0xf9,0xf8,0x42
+db 0x44,0x26,0x68,0x96,0x81,0xd9,0x45,0x3e,0x10,0x76,0xc6,0xa7,0x8b,0x39,0x43,0xe1
+db 0x3a,0xb5,0x56,0x2a,0xc0,0x6d,0xb3,0x05,0x22,0x66,0xbf,0xdc,0x0b,0xfa,0x62,0x48
+db 0xdd,0x20,0x11,0x06,0x36,0xc9,0xc1,0xcf,0xf6,0x27,0x52,0xbb,0x69,0xf5,0xd4,0x87
+db 0x7f,0x84,0x4c,0xd2,0x9c,0x57,0xa4,0xbc,0x4f,0x9a,0xdf,0xfe,0xd6,0x8d,0x7a,0xeb
+db 0x2b,0x53,0xd8,0x5c,0xa1,0x14,0x17,0xfb,0x23,0xd5,0x7d,0x30,0x67,0x73,0x08,0x09
+db 0xee,0xb7,0x70,0x3f,0x61,0xb2,0x19,0x8e,0x4e,0xe5,0x4b,0x93,0x8f,0x5d,0xdb,0xa9
+db 0xad,0xf1,0xae,0x2e,0xcb,0x0d,0xfc,0xf4,0x2d,0x46,0x6e,0x1d,0x97,0xe8,0xd1,0xe9
+db 0x4d,0x37,0xa5,0x75,0x5e,0x83,0x9e,0xab,0x82,0x9d,0xb9,0x1c,0xe0,0xcd,0x49,0x89
+db 0x01,0xb6,0xbd,0x58,0x24,0xa2,0x5f,0x38,0x78,0x99,0x15,0x90,0x50,0xb8,0x95,0xe4
+db 0xd0,0x91,0xc7,0xce,0xed,0x0f,0xb4,0x6f,0xa0,0xcc,0xf0,0x02,0x4a,0x79,0xc3,0xde
+db 0xa3,0xef,0xea,0x51,0xe6,0x6b,0x18,0xec,0x1b,0x2c,0x80,0xf7,0x74,0xe7,0xff,0x21
+db 0x5a,0x6a,0x54,0x1e,0x41,0x31,0x92,0x35,0xc4,0x33,0x07,0x0a,0xba,0x7e,0x0e,0x34
+db 0x88,0xb1,0x98,0x7c,0xf3,0x3d,0x60,0x6c,0x7b,0xca,0xd3,0x1f,0x32,0x65,0x04,0x28
+db 0x64,0xbe,0x85,0x9b,0x2f,0x59,0x8a,0xd7,0xb0,0x25,0xac,0xaf,0x12,0x03,0xe2,0xf2
+
+EK_d:
+dw 0x44D7, 0x26BC, 0x626B, 0x135E, 0x5789, 0x35E2, 0x7135, 0x09AF,
+dw 0x4D78, 0x2F13, 0x6BC4, 0x1AF1, 0x5E26, 0x3C4D, 0x789A, 0x47AC
+
+mask31:
+dd 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF
+
+align 16
+bit_reverse_table_l:
+db 0x00, 0x08, 0x04, 0x0c, 0x02, 0x0a, 0x06, 0x0e, 0x01, 0x09, 0x05, 0x0d, 0x03, 0x0b, 0x07, 0x0f
+
+align 16
+bit_reverse_table_h:
+db 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0
+
+align 16
+bit_reverse_and_table:
+db 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f
+
+align 16
+data_mask_64bits:
+dd 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
+
+bit_mask_table:
+db 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe
+
+
+section .text
+
+%define MASK31 xmm12
+
+%define OFS_R1 (16*(4*4))
+%define OFS_R2 (OFS_R1 + (4*4))
+%define OFS_X0 (OFS_R2 + (4*4))
+%define OFS_X1 (OFS_X0 + (4*4))
+%define OFS_X2 (OFS_X1 + (4*4))
+%define OFS_X3 (OFS_X2 + (4*4))
+
+%ifidn __OUTPUT_FORMAT__, win64
+ %define XMM_STORAGE 16*10
+%else
+ %define XMM_STORAGE 0
+%endif
+
+%define VARIABLE_OFFSET XMM_STORAGE
+
+%macro FUNC_SAVE 0
+ push r12
+ push r13
+ push r14
+ push r15
+%ifidn __OUTPUT_FORMAT__, win64
+ push rdi
+ push rsi
+%endif
+ mov r14, rsp
+
+ sub rsp, VARIABLE_OFFSET
+ and rsp, ~63
+
+%ifidn __OUTPUT_FORMAT__, win64
+ ; xmm6:xmm15 need to be maintained for Windows
+ movdqu [rsp + 0*16],xmm6
+ movdqu [rsp + 1*16],xmm7
+ movdqu [rsp + 2*16],xmm8
+ movdqu [rsp + 3*16],xmm9
+ movdqu [rsp + 4*16],xmm10
+ movdqu [rsp + 5*16],xmm11
+ movdqu [rsp + 6*16],xmm12
+ movdqu [rsp + 7*16],xmm13
+ movdqu [rsp + 8*16],xmm14
+ movdqu [rsp + 9*16],xmm15
+%endif
+%endmacro
+
+
+%macro FUNC_RESTORE 0
+
+%ifidn __OUTPUT_FORMAT__, win64
+ movdqu xmm15, [rsp + 9*16]
+ movdqu xmm14, [rsp + 8*16]
+ movdqu xmm13, [rsp + 7*16]
+ movdqu xmm12, [rsp + 6*16]
+ movdqu xmm11, [rsp + 5*16]
+ movdqu xmm10, [rsp + 4*16]
+ movdqu xmm9, [rsp + 3*16]
+ movdqu xmm8, [rsp + 2*16]
+ movdqu xmm7, [rsp + 1*16]
+ movdqu xmm6, [rsp + 0*16]
+%endif
+ mov rsp, r14
+%ifidn __OUTPUT_FORMAT__, win64
+ pop rsi
+ pop rdi
+%endif
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+%endmacro
+
+
+;
+; make_u31()
+;
+%macro make_u31 4
+
+%define %%Rt %1
+%define %%Ke %2
+%define %%Ek %3
+%define %%Iv %4
+ xor %%Rt, %%Rt
+ shrd %%Rt, %%Iv, 8
+ shrd %%Rt, %%Ek, 15
+ shrd %%Rt, %%Ke, 9
+%endmacro
+
+
+;
+; bits_reorg4()
+;
+; params
+; %1 - round number
+; rax - LFSR pointer
+; uses
+;
+; return
+;
+%macro bits_reorg4 1
+ ;
+ ; xmm15 = LFSR_S15
+ ; xmm14 = LFSR_S14
+ ; xmm11 = LFSR_S11
+ ; xmm9 = LFSR_S9
+ ; xmm7 = LFSR_S7
+ ; xmm5 = LFSR_S5
+ ; xmm2 = LFSR_S2
+ ; xmm0 = LFSR_S0
+ ;
+ movdqa xmm15, [rax + ((15 + %1) % 16)*16]
+ movdqa xmm14, [rax + ((14 + %1) % 16)*16]
+ movdqa xmm11, [rax + ((11 + %1) % 16)*16]
+ movdqa xmm9, [rax + (( 9 + %1) % 16)*16]
+ movdqa xmm7, [rax + (( 7 + %1) % 16)*16]
+ movdqa xmm5, [rax + (( 5 + %1) % 16)*16]
+ movdqa xmm2, [rax + (( 2 + %1) % 16)*16]
+ movdqa xmm0, [rax + (( 0 + %1) % 16)*16]
+
+ pxor xmm1, xmm1
+ pslld xmm15, 1
+ movdqa xmm3, xmm14
+ pblendw xmm3, xmm1, 0xAA
+ pblendw xmm15, xmm3, 0x55
+
+ movdqa [rax + OFS_X0], xmm15 ; BRC_X0
+ pslld xmm11, 16
+ psrld xmm9, 15
+ por xmm11, xmm9
+ movdqa [rax + OFS_X1], xmm11 ; BRC_X1
+ pslld xmm7, 16
+ psrld xmm5, 15
+ por xmm7, xmm5
+ movdqa [rax + OFS_X2], xmm7 ; BRC_X2
+ pslld xmm2, 16
+ psrld xmm0, 15
+ por xmm2, xmm0
+ movdqa [rax + OFS_X3], xmm2 ; BRC_X3
+%endmacro
+
+%macro lookup_single_sbox 2
+%define %%table %1 ; [in] Pointer to table to look up
+%define %%idx_val %2 ; [in/out] Index to look up and returned value (rcx, rdx, r8, r9)
+
+%ifdef SAFE_LOOKUP
+ ;; Save all registers used in lookup_8bit (xmm0-5, r9,r10)
+ ;; and registers for param passing and return (4 regs, OS dependent)
+ ;; (6*16 + 6*8 = 144 bytes)
+ sub rsp, 144
+
+ movdqu [rsp], xmm0
+ movdqu [rsp + 16], xmm1
+ movdqu [rsp + 32], xmm2
+ movdqu [rsp + 48], xmm3
+ movdqu [rsp + 64], xmm4
+ movdqu [rsp + 80], xmm5
+ mov [rsp + 96], r9
+ mov [rsp + 104], r10
+
+%ifdef LINUX
+ mov [rsp + 112], rdi
+ mov [rsp + 120], rsi
+ mov [rsp + 128], rdx
+ mov rdi, %%table
+ mov rsi, %%idx_val
+ mov rdx, 256
+%else
+%ifnidni %%idx_val, rcx
+ mov [rsp + 112], rcx
+%endif
+%ifnidni %%idx_val, rdx
+ mov [rsp + 120], rdx
+%endif
+%ifnidni %%idx_val, r8
+ mov [rsp + 128], r8
+%endif
+
+ mov rdx, %%idx_val
+ mov rcx, %%table
+ mov r8, 256
+%endif
+ mov [rsp + 136], rax
+
+ call lookup_8bit_sse
+
+ ;; Restore all registers
+ movdqu xmm0, [rsp]
+ movdqu xmm1, [rsp + 16]
+ movdqu xmm2, [rsp + 32]
+ movdqu xmm3, [rsp + 48]
+ movdqu xmm4, [rsp + 64]
+ movdqu xmm5, [rsp + 80]
+ mov r9, [rsp + 96]
+ mov r10, [rsp + 104]
+
+%ifdef LINUX
+ mov rdi, [rsp + 112]
+ mov rsi, [rsp + 120]
+ mov rdx, [rsp + 128]
+%else
+%ifnidni %%idx_val, rcx
+ mov rcx, [rsp + 112]
+%endif
+%ifnidni %%idx_val, rdx
+ mov rdx, [rsp + 120]
+%endif
+%ifnidni %%idx_val, rcx
+ mov r8, [rsp + 128]
+%endif
+%endif
+
+ ;; Move returned value from lookup function, before restoring rax
+ mov DWORD(%%idx_val), eax
+ mov rax, [rsp + 136]
+
+ add rsp, 144
+
+%else ;; SAFE_LOOKUP
+
+ movzx DWORD(%%idx_val), BYTE [%%table + %%idx_val]
+
+%endif ;; SAFE_LOOKUP
+%endmacro
+
+;
+; sbox_lkup()
+;
+; params
+; %1 R1/R2 table offset
+; %2 R1/R2 entry offset
+; %3 xmm reg name
+; uses
+; rcx,rdx,r8,r9,r10,rsi
+; return
+;
+%macro sbox_lkup 3
+ pextrb rcx, %3, (0 + (%2 * 4))
+ lookup_single_sbox rsi, rcx
+
+ pextrb rdx, %3, (1 + (%2 * 4))
+ lookup_single_sbox rdi, rdx
+
+ xor r10, r10
+ pextrb r8, %3, (2 + (%2 * 4))
+ lookup_single_sbox rsi, r8
+ pextrb r9, %3, (3 + (%2 * 4))
+ lookup_single_sbox rdi, r9
+
+ shrd r10d, ecx, 8
+ shrd r10d, edx, 8
+ shrd r10d, r8d, 8
+ shrd r10d, r9d, 8
+ mov [rax + %1 + (%2 * 4)], r10d
+%endmacro
+
+
+;
+; rot_mod32()
+;
+; uses xmm7
+;
+%macro rot_mod32 3
+ movdqa %1, %2
+ pslld %1, %3
+ movdqa xmm7, %2
+ psrld xmm7, (32 - %3)
+
+ por %1, xmm7
+%endmacro
+
+
+;
+; nonlin_fun4()
+;
+; params
+; %1 == 1, then calculate W
+; uses
+;
+; return
+; xmm0 = W value, updates F_R1[] / F_R2[]
+;
+%macro nonlin_fun4 1
+
+%if (%1 == 1)
+ movdqa xmm0, [rax + OFS_X0]
+ pxor xmm0, [rax + OFS_R1]
+ paddd xmm0, [rax + OFS_R2] ; W = (BRC_X0 ^ F_R1) + F_R2
+%endif
+ ;
+ movdqa xmm1, [rax + OFS_R1]
+ movdqa xmm2, [rax + OFS_R2]
+ paddd xmm1, [rax + OFS_X1] ; W1 = F_R1 + BRC_X1
+ pxor xmm2, [rax + OFS_X2] ; W2 = F_R2 ^ BRC_X2
+ ;
+
+ movdqa xmm3, xmm1
+ movdqa xmm4, xmm1
+ movdqa xmm5, xmm2
+ movdqa xmm6, xmm2
+ pslld xmm3, 16
+ psrld xmm4, 16
+ pslld xmm5, 16
+ psrld xmm6, 16
+ movdqa xmm1, xmm3
+ movdqa xmm2, xmm4
+ por xmm1, xmm6
+ por xmm2, xmm5
+
+ ;
+ rot_mod32 xmm3, xmm1, 2
+ rot_mod32 xmm4, xmm1, 10
+ rot_mod32 xmm5, xmm1, 18
+ rot_mod32 xmm6, xmm1, 24
+ pxor xmm1, xmm3
+ pxor xmm1, xmm4
+ pxor xmm1, xmm5
+ pxor xmm1, xmm6 ; XMM1 = U = L1(P)
+
+ sbox_lkup OFS_R1, 0, xmm1 ; F_R1[0]
+ sbox_lkup OFS_R1, 1, xmm1 ; F_R1[1]
+ sbox_lkup OFS_R1, 2, xmm1 ; F_R1[2]
+ sbox_lkup OFS_R1, 3, xmm1 ; F_R1[3]
+ ;
+ rot_mod32 xmm3, xmm2, 8
+ rot_mod32 xmm4, xmm2, 14
+ rot_mod32 xmm5, xmm2, 22
+ rot_mod32 xmm6, xmm2, 30
+ pxor xmm2, xmm3
+ pxor xmm2, xmm4
+ pxor xmm2, xmm5
+ pxor xmm2, xmm6 ; XMM2 = V = L2(Q)
+ ;
+
+ sbox_lkup OFS_R2, 0, xmm2 ; F_R2[0]
+ sbox_lkup OFS_R2, 1, xmm2 ; F_R2[1]
+ sbox_lkup OFS_R2, 2, xmm2 ; F_R2[2]
+ sbox_lkup OFS_R2, 3, xmm2 ; F_R2[3]
+%endmacro
+
+
+;
+; store_kstr4()
+;
+; params
+;
+; uses
+; xmm0 as input
+; return
+;
+%macro store_kstr4 0
+ pxor xmm0, [rax + OFS_X3]
+ pextrd r15d, xmm0, 3
+ pop r9 ; *pKeyStr4
+ pextrd r14d, xmm0, 2
+ pop r8 ; *pKeyStr3
+ pextrd r13d, xmm0, 1
+ pop rdx ; *pKeyStr2
+ pextrd r12d, xmm0, 0
+ pop rcx ; *pKeyStr1
+ mov [r9], r15d
+ mov [r8], r14d
+ mov [rdx], r13d
+ mov [rcx], r12d
+ add rcx, 4
+ add rdx, 4
+ add r8, 4
+ add r9, 4
+ push rcx
+ push rdx
+ push r8
+ push r9
+%endmacro
+
+
+;
+; add_mod31()
+; add two 32-bit args and reduce mod (2^31-1)
+; params
+; %1 - arg1/res
+; %2 - arg2
+; uses
+; xmm2
+; return
+; %1
+%macro add_mod31 2
+ paddd %1, %2
+ movdqa xmm2, %1
+ psrld xmm2, 31
+ pand %1, MASK31
+ paddd %1, xmm2
+%endmacro
+
+
+;
+; rot_mod31()
+; rotate (mult by pow of 2) 32-bit arg and reduce mod (2^31-1)
+; params
+; %1 - arg
+; %2 - # of bits
+; uses
+; xmm2
+; return
+; %1
+%macro rot_mod31 2
+
+ movdqa xmm2, %1
+ pslld xmm2, %2
+ psrld %1, (31 - %2)
+
+ por %1, xmm2
+ pand %1, MASK31
+%endmacro
+
+
+;
+; lfsr_updt4()
+;
+; params
+; %1 - round number
+; uses
+; xmm0 as input (ZERO or W)
+; return
+;
+%macro lfsr_updt4 1
+ ;
+ ; xmm1 = LFSR_S0
+ ; xmm4 = LFSR_S4
+ ; xmm10 = LFSR_S10
+ ; xmm13 = LFSR_S13
+ ; xmm15 = LFSR_S15
+ ;
+ pxor xmm3, xmm3
+ movdqa xmm1, [rax + (( 0 + %1) % 16)*16]
+ movdqa xmm4, [rax + (( 4 + %1) % 16)*16]
+ movdqa xmm10, [rax + ((10 + %1) % 16)*16]
+ movdqa xmm13, [rax + ((13 + %1) % 16)*16]
+ movdqa xmm15, [rax + ((15 + %1) % 16)*16]
+
+ ; Calculate LFSR feedback
+ add_mod31 xmm0, xmm1
+ rot_mod31 xmm1, 8
+ add_mod31 xmm0, xmm1
+ rot_mod31 xmm4, 20
+ add_mod31 xmm0, xmm4
+ rot_mod31 xmm10, 21
+ add_mod31 xmm0, xmm10
+ rot_mod31 xmm13, 17
+ add_mod31 xmm0, xmm13
+ rot_mod31 xmm15, 15
+ add_mod31 xmm0, xmm15
+
+
+
+ movdqa [rax + (( 0 + %1) % 16)*16], xmm0
+
+ ; LFSR_S16 = (LFSR_S15++) = eax
+%endmacro
+
+
+;
+; key_expand_4()
+;
+%macro key_expand_4 2
+ movzx r8d, byte [rdi + (%1 + 0)]
+ movzx r9d, word [rbx + ((%1 + 0)*2)]
+ movzx r10d, byte [rsi + (%1 + 0)]
+ make_u31 r11d, r8d, r9d, r10d
+ mov [rax + (((%1 + 0)*16)+(%2*4))], r11d
+
+ movzx r12d, byte [rdi + (%1 + 1)]
+ movzx r13d, word [rbx + ((%1 + 1)*2)]
+ movzx r14d, byte [rsi + (%1 + 1)]
+ make_u31 r15d, r12d, r13d, r14d
+ mov [rax + (((%1 + 1)*16)+(%2*4))], r15d
+%endmacro
+
+MKGLOBAL(asm_ZucInitialization_4_sse,function,internal)
+asm_ZucInitialization_4_sse:
+
+%ifdef LINUX
+ %define pKe rdi
+ %define pIv rsi
+ %define pState rdx
+%else
+ %define pKe rcx
+ %define pIv rdx
+ %define pState r8
+%endif
+
+ ; Save non-volatile registers
+ push rbx
+ push rdi
+ push rsi
+ push r12
+ push r13
+ push r14
+ push r15
+ push rdx
+
+ lea rax, [pState] ; load pointer to LFSR
+ push pState ; Save LFSR Pointer to stack
+
+ ; setup the key pointer for first buffer key expand
+ mov rbx, [pKe] ; load the pointer to the array of keys into rbx
+
+ push pKe ; save rdi (key pointer) to the stack
+ lea rdi, [rbx] ; load the pointer to the first key into rdi
+
+
+ ; setup the IV pointer for first buffer key expand
+ mov rcx, [pIv] ; load the pointer to the array of IV's
+ push pIv ; save the IV pointer to the stack
+ lea rsi, [rcx] ; load the first IV pointer
+
+ lea rbx, [EK_d] ; load D variables
+
+ ; Expand key packet 1
+ key_expand_4 0, 0
+ key_expand_4 2, 0
+ key_expand_4 4, 0
+ key_expand_4 6, 0
+ key_expand_4 8, 0
+ key_expand_4 10, 0
+ key_expand_4 12, 0
+ key_expand_4 14, 0
+
+
+ ;second packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+8] ; load offset to IV 2 in array
+ lea rsi, [rcx] ; load pointer to IV2
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+8] ; load offset to key 2 in array
+ lea rdi, [rcx] ; load pointer to Key 2
+
+ push rbx ; save Key pointer
+ push rdx ; save IV pointer
+
+ lea rbx, [EK_d]
+
+ ; Expand key packet 2
+ key_expand_4 0, 1
+ key_expand_4 2, 1
+ key_expand_4 4, 1
+ key_expand_4 6, 1
+ key_expand_4 8, 1
+ key_expand_4 10, 1
+ key_expand_4 12, 1
+ key_expand_4 14, 1
+
+
+
+ ;Third packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+16] ; load offset to IV 3 in array
+ lea rsi, [rcx] ; load pointer to IV3
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+16] ; load offset to key 3 in array
+ lea rdi, [rcx] ; load pointer to Key 3
+
+ push rbx ; save Key pointer
+ push rdx ; save IV pointer
+ lea rbx, [EK_d]
+ ; Expand key packet 3
+ key_expand_4 0, 2
+ key_expand_4 2, 2
+ key_expand_4 4, 2
+ key_expand_4 6, 2
+ key_expand_4 8, 2
+ key_expand_4 10, 2
+ key_expand_4 12, 2
+ key_expand_4 14, 2
+
+
+
+ ;fourth packet key expand here - reset pointers
+ pop rdx ; get IV array pointer from Stack
+ mov rcx, [rdx+24] ; load offset to IV 4 in array
+ lea rsi, [rcx] ; load pointer to IV4
+
+ pop rbx ; get Key array pointer from Stack
+ mov rcx, [rbx+24] ; load offset to key 2 in array
+ lea rdi, [rcx] ; load pointer to Key 2
+ lea rbx, [EK_d]
+ ; Expand key packet 4
+ key_expand_4 0, 3
+ key_expand_4 2, 3
+ key_expand_4 4, 3
+ key_expand_4 6, 3
+ key_expand_4 8, 3
+ key_expand_4 10, 3
+ key_expand_4 12, 3
+ key_expand_4 14, 3
+
+ ; Set R1 and R2 to zero
+ ;xor r10, r10
+ ;xor r11, r11
+
+
+
+ ; Load read-only registers
+ lea rdi, [S0] ; used by sbox_lkup() macro
+ lea rsi, [S1]
+ movdqa xmm12, [mask31]
+
+ ; Shift LFSR 32-times, update state variables
+%assign N 0
+%rep 32
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ bits_reorg4 N
+ nonlin_fun4 1
+ psrld xmm0,1 ; Shift out LSB of W
+
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ lfsr_updt4 N ; W (xmm0) used in LFSR update - not set to zero
+%assign N N+1
+%endrep
+
+ ; And once more, initial round from keygen phase = 33 times
+ pop rdx
+ lea rax, [rdx]
+ push rdx
+
+ bits_reorg4 0
+ nonlin_fun4 0
+
+ pop rdx
+ lea rax, [rdx]
+
+ pxor xmm0, xmm0
+ lfsr_updt4 0
+
+
+
+ ; Restore non-volatile registers
+ pop rdx
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop rsi
+ pop rdi
+ pop rbx
+
+ ret
+;
+;
+;
+;;
+;; void asm_ZucGenKeystream64B_4_sse(state4_t *pSta, u32* pKeyStr1, u32* pKeyStr2, u32* pKeyStr3, u32* pKeyStr4);
+;;
+;; WIN64
+;; RCX - pSta
+;; RDX - pKeyStr1
+;; R8 - pKeyStr2
+;; R9 - pKeyStr3
+;; Stack - pKeyStr4
+;;
+;; LIN64
+;; RDI - pSta
+;; RSI - pKeyStr1
+;; RDX - pKeyStr2
+;; RCX - pKeyStr3
+;; R8 - pKeyStr4
+;;
+MKGLOBAL(asm_ZucGenKeystream64B_4_sse,function,internal)
+asm_ZucGenKeystream64B_4_sse:
+
+%ifdef LINUX
+ %define pState rdi
+ %define pKS1 rsi
+ %define pKS2 rdx
+ %define pKS3 rcx
+ %define pKS4 r8
+%else
+ %define pState rcx
+ %define pKS1 rdx
+ %define pKS2 r8
+ %define pKS3 r9
+ %define pKS4 rax
+%endif
+
+%ifndef LINUX
+ mov rax, [rsp + 8*5] ; 5th parameter from stack
+%endif
+
+ ; Save non-volatile registers
+ push rbx
+ push r12
+ push r13
+ push r14
+ push r15
+
+%ifndef LINUX
+ push rdi
+ push rsi
+%endif
+ ; Store 4 keystream pointers on the stack
+
+ push pKS1
+ push pKS2
+ push pKS3
+ push pKS4
+
+
+ ; Load state pointer in RAX
+ mov rax, pState
+
+
+ ; Load read-only registers
+ lea rdi, [S0] ; used by sbox_lkup() macro
+ lea rsi, [S1]
+ movdqa xmm12, [mask31]
+
+ ; Generate 64B of keystream in 16 rounds
+%assign N 1
+%rep 16
+ bits_reorg4 N
+ nonlin_fun4 1
+ store_kstr4
+ pxor xmm0, xmm0
+ lfsr_updt4 N
+%assign N N+1
+%endrep
+
+ ; Take keystream pointers off (#push = #pops)
+ pop rax
+ pop rax
+ pop rax
+ pop rax
+
+%ifndef LINUX
+ pop rsi
+ pop rdi
+%endif
+
+ ; Restore non-volatile registers
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop rbx
+ ret
+
+;;
+;; extern uint32_t Zuc_Eia3_Remainder_sse(const void *ks, const void *data, uint64_t n_bits)
+;;
+;; Returns authentication update value to be XOR'ed with current authentication tag
+;;
+;; WIN64
+;; RCX - KS (key stream pointer)
+;; RDX - DATA (data pointer)
+;; R8 - N_BITS (number data bits to process)
+;; LIN64
+;; RDI - KS (key stream pointer)
+;; RSI - DATA (data pointer)
+;; RDX - N_BITS (number data bits to process)
+;;
+align 16
+MKGLOBAL(asm_Eia3RemainderSSE,function,internal)
+asm_Eia3RemainderSSE:
+%ifdef LINUX
+ %define KS rdi
+ %define DATA rsi
+ %define N_BITS rdx
+%else
+ %define KS rcx
+ %define DATA rdx
+ %define N_BITS r8
+%endif
+
+ FUNC_SAVE
+
+ movdqa xmm5, [bit_reverse_table_l]
+ movdqa xmm6, [bit_reverse_table_h]
+ movdqa xmm7, [bit_reverse_and_table]
+ movdqa xmm10, [data_mask_64bits]
+
+ pxor xmm9, xmm9
+
+%rep 3
+ cmp N_BITS, 128
+ jb Eia3RoundsSSE_dq_end
+
+ ;; read 16 bytes and reverse bits
+ movdqu xmm0, [DATA]
+ movdqa xmm1, xmm0
+ pand xmm1, xmm7
+
+ movdqa xmm2, xmm7
+ pandn xmm2, xmm0
+ psrld xmm2, 4
+
+ movdqa xmm8, xmm6 ; bit reverse low nibbles (use high table)
+ pshufb xmm8, xmm1
+
+ movdqa xmm4, xmm5 ; bit reverse high nibbles (use low table)
+ pshufb xmm4, xmm2
+
+ por xmm8, xmm4
+ ; xmm8 - bit reversed data bytes
+
+ ;; ZUC authentication part
+ ;; - 4x32 data bits
+ ;; - set up KS
+ movdqu xmm3, [KS + (0*4)]
+ movdqu xmm4, [KS + (2*4)]
+ pshufd xmm0, xmm3, 0x61
+ pshufd xmm1, xmm4, 0x61
+
+ ;; - set up DATA
+ movdqa xmm2, xmm8
+ pand xmm2, xmm10
+ pshufd xmm3, xmm2, 0xdc
+ movdqa xmm4, xmm3
+
+ psrldq xmm8, 8
+ pshufd xmm13, xmm8, 0xdc
+ movdqa xmm14, xmm13
+
+ ;; - clmul
+ ;; - xor the results from 4 32-bit words together
+ pclmulqdq xmm3, xmm0, 0x00
+ pclmulqdq xmm4, xmm0, 0x11
+ pclmulqdq xmm13, xmm1, 0x00
+ pclmulqdq xmm14, xmm1, 0x11
+
+ pxor xmm3, xmm4
+ pxor xmm13, xmm14
+ pxor xmm9, xmm3
+ pxor xmm9, xmm13
+ lea DATA, [DATA + 16]
+ lea KS, [KS + 16]
+ sub N_BITS, 128
+%endrep
+Eia3RoundsSSE_dq_end:
+
+%rep 3
+ cmp N_BITS, 32
+ jb Eia3RoundsSSE_dw_end
+
+ ;; swap dwords in KS
+ movq xmm1, [KS]
+ pshufd xmm4, xmm1, 0xf1
+
+ ;; bit-reverse 4 bytes of data
+ movdqa xmm2, xmm7
+ movd xmm0, [DATA]
+ movdqa xmm1, xmm0
+ pand xmm1, xmm2
+
+ pandn xmm2, xmm0
+ psrld xmm2, 4
+
+ movdqa xmm0, xmm6 ; bit reverse low nibbles (use high table)
+ pshufb xmm0, xmm1
+
+ movdqa xmm3, xmm5 ; bit reverse high nibbles (use low table)
+ pshufb xmm3, xmm2
+
+ por xmm0, xmm3
+
+ ;; rol & xor
+ pclmulqdq xmm0, xmm4, 0
+ pxor xmm9, xmm0
+
+ lea DATA, [DATA + 4]
+ lea KS, [KS + 4]
+ sub N_BITS, 32
+%endrep
+
+Eia3RoundsSSE_dw_end:
+ movq rax, xmm9
+ shr rax, 32
+
+ or N_BITS, N_BITS
+ jz Eia3RoundsSSE_byte_loop_end
+
+ ;; get 64-bit key stream for the last data bits (less than 32)
+ mov KS, [KS]
+
+; ;; process remaining data bytes and bits
+Eia3RoundsSSE_byte_loop:
+ or N_BITS, N_BITS
+ jz Eia3RoundsSSE_byte_loop_end
+
+ cmp N_BITS, 8
+ jb Eia3RoundsSSE_byte_partial
+
+ movzx r11, byte [DATA]
+ sub N_BITS, 8
+ jmp Eia3RoundsSSE_byte_read
+
+Eia3RoundsSSE_byte_partial:
+ ;; process remaining bits (up to 7)
+ lea r11, [bit_mask_table]
+ movzx r10, byte [r11 + N_BITS]
+ movzx r11, byte [DATA]
+ and r11, r10
+ xor N_BITS, N_BITS
+Eia3RoundsSSE_byte_read:
+
+%assign DATATEST 0x80
+%rep 8
+ xor r10, r10
+ test r11, DATATEST
+ cmovne r10, KS
+ xor rax, r10
+ rol KS, 1
+%assign DATATEST (DATATEST >> 1)
+%endrep ; byte boundary
+ lea DATA, [DATA + 1]
+ jmp Eia3RoundsSSE_byte_loop
+
+Eia3RoundsSSE_byte_loop_end:
+
+ ;; eax - holds the return value at this stage
+
+ FUNC_RESTORE
+
+ ret
+
+;;
+;;extern uint32_t Zuc_Eia3_Round64B_sse(uint32_t T, const void *KS, const void *DATA)
+;;
+;; Updates authentication tag T based on keystream KS and DATA.
+;; - it processes 64 bytes of DATA
+;; - reads data in 16 byte chunks and bit reverses them
+;; - reads and re-arranges KS
+;; - employs clmul for the XOR & ROL part
+;; - copies top 64 butes of KS to bottom (for the next round)
+;;
+;; WIN64
+;; RCX - T
+;; RDX - KS pointer to key stream (2 x 64 bytes)
+;;; R8 - DATA pointer to data
+;; LIN64
+;; RDI - T
+;; RSI - KS pointer to key stream (2 x 64 bytes)
+;; RDX - DATA pointer to data
+;;
+align 16
+MKGLOBAL(asm_Eia3Round64BSSE,function,internal)
+asm_Eia3Round64BSSE:
+
+%ifdef LINUX
+ %define T edi
+ %define KS rsi
+ %define DATA rdx
+%else
+ %define T ecx
+ %define KS rdx
+ %define DATA r8
+%endif
+
+ FUNC_SAVE
+
+ movdqa xmm5, [bit_reverse_table_l]
+ movdqa xmm6, [bit_reverse_table_h]
+ movdqa xmm7, [bit_reverse_and_table]
+ movdqa xmm10, [data_mask_64bits]
+
+ pxor xmm9, xmm9
+
+%assign I 0
+%rep 4
+ ;; read 16 bytes and reverse bits
+ movdqu xmm0, [DATA + 16*I]
+ movdqa xmm1, xmm0
+ pand xmm1, xmm7
+
+ movdqa xmm2, xmm7
+ pandn xmm2, xmm0
+ psrld xmm2, 4
+
+ movdqa xmm8, xmm6 ; bit reverse low nibbles (use high table)
+ pshufb xmm8, xmm1
+
+ movdqa xmm4, xmm5 ; bit reverse high nibbles (use low table)
+ pshufb xmm4, xmm2
+
+ por xmm8, xmm4
+ ; xmm8 - bit reversed data bytes
+
+ ;; ZUC authentication part
+ ;; - 4x32 data bits
+ ;; - set up KS
+%if I != 0
+ movdqa xmm0, xmm12
+ movdqu xmm2, [KS + (I*16) + (4*4)]
+ movdqa xmm12, xmm2
+ palignr xmm2, xmm0, 8
+ pshufd xmm1, xmm0, 0x61
+ pshufd xmm11, xmm2, 0x61
+%else
+ movdqu xmm2, [KS + (I*16) + (0*4)]
+ movdqu xmm3, [KS + (I*16) + (4*4)]
+ movdqa xmm12, xmm3
+ palignr xmm3, xmm2, 8
+ pshufd xmm1, xmm2, 0x61
+ pshufd xmm11, xmm3, 0x61
+%endif
+
+ ;; - set up DATA
+ movdqa xmm0, xmm8
+ pand xmm0, xmm10
+ pshufd xmm3, xmm0, 0xdc
+ movdqa xmm0, xmm3
+
+ psrldq xmm8, 8
+ pshufd xmm13, xmm8, 0xdc
+ movdqa xmm14, xmm13
+
+ ;; - clmul
+ ;; - xor the results from 4 32-bit words together
+ pclmulqdq xmm0, xmm1, 0x00
+ pclmulqdq xmm3, xmm1, 0x11
+ pclmulqdq xmm14, xmm11, 0x00
+ pclmulqdq xmm13, xmm11, 0x11
+
+ pxor xmm3, xmm0
+ pxor xmm13, xmm14
+ pxor xmm9, xmm3
+ pxor xmm9, xmm13
+
+%assign I (I + 1)
+%endrep
+
+ ;; - update T
+ movq rax, xmm9
+ shr rax, 32
+ xor eax, T
+
+ FUNC_RESTORE
+
+ ret
+
+
+;----------------------------------------------------------------------------------------
+;----------------------------------------------------------------------------------------
+
+%ifdef LINUX
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
diff --git a/src/spdk/intel-ipsec-mb/sse/zuc_sse_top.c b/src/spdk/intel-ipsec-mb/sse/zuc_sse_top.c
new file mode 100755
index 000000000..5a4eb98c5
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/sse/zuc_sse_top.c
@@ -0,0 +1,554 @@
+/*******************************************************************************
+ Copyright (c) 2009-2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+/*-----------------------------------------------------------------------
+* zuc_sse.c
+*-----------------------------------------------------------------------
+* An implementation of ZUC, the core algorithm for the
+* 3GPP Confidentiality and Integrity algorithms.
+*
+*-----------------------------------------------------------------------*/
+
+#include <string.h>
+
+#include "include/zuc_internal.h"
+#include "include/wireless_common.h"
+#include "include/save_xmms.h"
+#include "include/clear_regs_mem.h"
+#include "intel-ipsec-mb.h"
+
+#define SAVE_XMMS save_xmms
+#define RESTORE_XMMS restore_xmms
+#define CLEAR_SCRATCH_SIMD_REGS clear_scratch_xmms_sse
+
+static inline
+void _zuc_eea3_1_buffer_sse(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t length)
+{
+ DECLARE_ALIGNED(ZucState_t zucState, 64);
+ DECLARE_ALIGNED(uint8_t keyStream[64], 64);
+ /* buffer to store 64 bytes of keystream */
+ DECLARE_ALIGNED(uint8_t tempSrc[64], 64);
+ DECLARE_ALIGNED(uint8_t tempDst[64], 64);
+
+ const uint64_t *pIn64 = NULL;
+ const uint8_t *pIn8 = NULL;
+ uint8_t *pOut8 = NULL;
+ uint64_t *pOut64 = NULL, *pKeyStream64 = NULL;
+ uint64_t *pTemp64 = NULL, *pdstTemp64 = NULL;
+
+ uint32_t numKeyStreamsPerPkt = length/ ZUC_KEYSTR_LEN;
+ uint32_t numBytesLeftOver = length % ZUC_KEYSTR_LEN;
+
+ /* need to set the LFSR state to zero */
+ memset(&zucState, 0, sizeof(ZucState_t));
+
+ /* initialize the zuc state */
+ asm_ZucInitialization(pKey, pIv, &(zucState));
+
+ /* Loop Over all the Quad-Words in input buffer and XOR with the 64bits
+ * of generated keystream */
+ pOut64 = (uint64_t *) pBufferOut;
+ pIn64 = (const uint64_t *) pBufferIn;
+
+ while (numKeyStreamsPerPkt--) {
+ /* Generate the key stream 64 bytes at a time */
+ asm_ZucGenKeystream64B((uint32_t *) &keyStream[0], &zucState);
+
+ /* XOR The Keystream generated with the input buffer here */
+ pKeyStream64 = (uint64_t *) keyStream;
+ asm_XorKeyStream64B_sse(pIn64, pOut64, pKeyStream64);
+ pIn64 += 8;
+ pOut64 += 8;
+ }
+
+ /* Check for remaining 0 to 63 bytes */
+ pIn8 = (const uint8_t *) pBufferIn;
+ pOut8 = (uint8_t *) pBufferOut;
+ if(numBytesLeftOver) {
+ asm_ZucGenKeystream64B((uint32_t *) &keyStream[0], &zucState);
+
+ /* copy the remaining bytes into temporary buffer and XOR with
+ * the 64-bytes of keystream. Then copy on the valid bytes back
+ * to the output buffer */
+
+ memcpy(&tempSrc[0], &pIn8[length - numBytesLeftOver],
+ numBytesLeftOver);
+ pKeyStream64 = (uint64_t *) &keyStream[0];
+ pTemp64 = (uint64_t *) &tempSrc[0];
+ pdstTemp64 = (uint64_t *) &tempDst[0];
+
+ asm_XorKeyStream64B_sse(pTemp64, pdstTemp64, pKeyStream64);
+ memcpy(&pOut8[length - numBytesLeftOver], &tempDst[0],
+ numBytesLeftOver);
+
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(keyStream, sizeof(keyStream));
+ clear_mem(&zucState, sizeof(zucState));
+#endif
+}
+
+static inline
+void _zuc_eea3_4_buffer_sse(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t length[4])
+{
+
+ DECLARE_ALIGNED(ZucState4_t state, 64);
+ DECLARE_ALIGNED(ZucState_t singlePktState, 64);
+
+ unsigned int i = 0;
+ /* Calculate the minimum input packet size */
+ uint32_t bytes1 = (length[0] < length[1] ?
+ length[0] : length[1]);
+ uint32_t bytes2 = (length[2] < length[3] ?
+ length[2] : length[3]);
+ /* min number of bytes */
+ uint32_t bytes = (bytes1 < bytes2) ? bytes1 : bytes2;
+ uint32_t numKeyStreamsPerPkt = bytes/ZUC_KEYSTR_LEN;
+ uint32_t remainBytes[4] = {0};
+ DECLARE_ALIGNED(uint8_t keyStr1[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr2[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr3[64], 64);
+ DECLARE_ALIGNED(uint8_t keyStr4[64], 64);
+ DECLARE_ALIGNED(uint8_t tempSrc[64], 64);
+ DECLARE_ALIGNED(uint8_t tempDst[64], 64);
+ /* structure to store the 4 keys */
+ DECLARE_ALIGNED(ZucKey4_t keys, 64);
+ /* structure to store the 4 IV's */
+ DECLARE_ALIGNED(ZucIv4_t ivs, 64);
+ uint32_t numBytesLeftOver = 0;
+ const uint8_t *pTempBufInPtr = NULL;
+ uint8_t *pTempBufOutPtr = NULL;
+
+ const uint64_t *pIn64_0 = NULL;
+ const uint64_t *pIn64_1 = NULL;
+ const uint64_t *pIn64_2 = NULL;
+ const uint64_t *pIn64_3 = NULL;
+ uint64_t *pOut64_0 = NULL;
+ uint64_t *pOut64_1 = NULL;
+ uint64_t *pOut64_2 = NULL;
+ uint64_t *pOut64_3 = NULL;
+ uint64_t *pTempSrc64 = NULL;
+ uint64_t *pTempDst64 = NULL;
+ uint64_t *pKeyStream64 = NULL;
+
+ /* rounded down minimum length */
+ bytes = numKeyStreamsPerPkt * ZUC_KEYSTR_LEN;
+
+ /* Need to set the LFSR state to zero */
+ memset(&state, 0, sizeof(ZucState4_t));
+
+ /* Calculate the number of bytes left over for each packet */
+ for (i=0; i< 4; i++)
+ remainBytes[i] = length[i] - bytes;
+
+ /* Setup the Keys */
+ keys.pKey1 = pKey[0];
+ keys.pKey2 = pKey[1];
+ keys.pKey3 = pKey[2];
+ keys.pKey4 = pKey[3];
+
+ /* setup the IV's */
+ ivs.pIv1 = pIv[0];
+ ivs.pIv2 = pIv[1];
+ ivs.pIv3 = pIv[2];
+ ivs.pIv4 = pIv[3];
+
+ asm_ZucInitialization_4_sse( &keys, &ivs, &state);
+
+ pOut64_0 = (uint64_t *) pBufferOut[0];
+ pOut64_1 = (uint64_t *) pBufferOut[1];
+ pOut64_2 = (uint64_t *) pBufferOut[2];
+ pOut64_3 = (uint64_t *) pBufferOut[3];
+
+ pIn64_0 = (const uint64_t *) pBufferIn[0];
+ pIn64_1 = (const uint64_t *) pBufferIn[1];
+ pIn64_2 = (const uint64_t *) pBufferIn[2];
+ pIn64_3 = (const uint64_t *) pBufferIn[3];
+
+ /* Loop for 64 bytes at a time generating 4 key-streams per loop */
+ while (numKeyStreamsPerPkt) {
+ /* Generate 64 bytes at a time */
+ asm_ZucGenKeystream64B_4_sse(&state,
+ (uint32_t *) keyStr1,
+ (uint32_t *) keyStr2,
+ (uint32_t *) keyStr3,
+ (uint32_t *) keyStr4);
+
+ /* XOR the KeyStream with the input buffers and store in output
+ * buffer*/
+ pKeyStream64 = (uint64_t *) keyStr1;
+ asm_XorKeyStream64B_sse(pIn64_0, pOut64_0, pKeyStream64);
+ pIn64_0 += 8;
+ pOut64_0 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr2;
+ asm_XorKeyStream64B_sse(pIn64_1, pOut64_1, pKeyStream64);
+ pIn64_1 += 8;
+ pOut64_1 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr3;
+ asm_XorKeyStream64B_sse(pIn64_2, pOut64_2, pKeyStream64);
+ pIn64_2 += 8;
+ pOut64_2 += 8;
+
+ pKeyStream64 = (uint64_t *) keyStr4;
+ asm_XorKeyStream64B_sse(pIn64_3, pOut64_3, pKeyStream64);
+ pIn64_3 += 8;
+ pOut64_3 += 8;
+
+ /* Update keystream count */
+ numKeyStreamsPerPkt--;
+
+ }
+
+ /* process each packet separately for the remaining bytes */
+ for (i = 0; i < 4; i++) {
+ if (remainBytes[i]) {
+ /* need to copy the zuc state to single packet state */
+ singlePktState.lfsrState[0] = state.lfsrState[0][i];
+ singlePktState.lfsrState[1] = state.lfsrState[1][i];
+ singlePktState.lfsrState[2] = state.lfsrState[2][i];
+ singlePktState.lfsrState[3] = state.lfsrState[3][i];
+ singlePktState.lfsrState[4] = state.lfsrState[4][i];
+ singlePktState.lfsrState[5] = state.lfsrState[5][i];
+ singlePktState.lfsrState[6] = state.lfsrState[6][i];
+ singlePktState.lfsrState[7] = state.lfsrState[7][i];
+ singlePktState.lfsrState[8] = state.lfsrState[8][i];
+ singlePktState.lfsrState[9] = state.lfsrState[9][i];
+ singlePktState.lfsrState[10] = state.lfsrState[10][i];
+ singlePktState.lfsrState[11] = state.lfsrState[11][i];
+ singlePktState.lfsrState[12] = state.lfsrState[12][i];
+ singlePktState.lfsrState[13] = state.lfsrState[13][i];
+ singlePktState.lfsrState[14] = state.lfsrState[14][i];
+ singlePktState.lfsrState[15] = state.lfsrState[15][i];
+
+ singlePktState.fR1 = state.fR1[i];
+ singlePktState.fR2 = state.fR2[i];
+
+ singlePktState.bX0 = state.bX0[i];
+ singlePktState.bX1 = state.bX1[i];
+ singlePktState.bX2 = state.bX2[i];
+ singlePktState.bX3 = state.bX3[i];
+
+ numKeyStreamsPerPkt = remainBytes[i] / ZUC_KEYSTR_LEN;
+ numBytesLeftOver = remainBytes[i] % ZUC_KEYSTR_LEN;
+
+ pTempBufInPtr = pBufferIn[i];
+ pTempBufOutPtr = pBufferOut[i];
+
+ /* update the output and input pointers here to point
+ * to the i'th buffers */
+ pOut64_0 = (uint64_t *) &pTempBufOutPtr[length[i] -
+ remainBytes[i]];
+ pIn64_0 = (const uint64_t *) &pTempBufInPtr[length[i] -
+ remainBytes[i]];
+
+ while (numKeyStreamsPerPkt--) {
+ /* Generate the key stream 64 bytes at a time */
+ asm_ZucGenKeystream64B((uint32_t *) keyStr1,
+ &singlePktState);
+ pKeyStream64 = (uint64_t *) keyStr1;
+ asm_XorKeyStream64B_sse(pIn64_0, pOut64_0,
+ pKeyStream64);
+ pIn64_0 += 8;
+ pOut64_0 += 8;
+ }
+
+
+ /* Check for remaining 0 to 63 bytes */
+ if (numBytesLeftOver) {
+ asm_ZucGenKeystream64B((uint32_t *) &keyStr1,
+ &singlePktState);
+ uint32_t offset = length[i] - numBytesLeftOver;
+
+ /* copy the remaining bytes into temporary
+ * buffer and XOR with the 64-bytes of
+ * keystream. Then copy on the valid bytes back
+ * to the output buffer */
+ memcpy(&tempSrc[0], &pTempBufInPtr[offset],
+ numBytesLeftOver);
+ memset(&tempSrc[numBytesLeftOver], 0,
+ 64 - numBytesLeftOver);
+
+ pKeyStream64 = (uint64_t *) &keyStr1[0];
+ pTempSrc64 = (uint64_t *) &tempSrc[0];
+ pTempDst64 = (uint64_t *) &tempDst[0];
+ asm_XorKeyStream64B_sse(pTempSrc64, pTempDst64,
+ pKeyStream64);
+
+ memcpy(&pTempBufOutPtr[offset],
+ &tempDst[0], numBytesLeftOver);
+ }
+ }
+ }
+#ifdef SAFE_DATA
+ /* Clear sensitive data in stack */
+ clear_mem(keyStr1, sizeof(keyStr1));
+ clear_mem(keyStr2, sizeof(keyStr2));
+ clear_mem(keyStr3, sizeof(keyStr3));
+ clear_mem(keyStr4, sizeof(keyStr4));
+ clear_mem(&singlePktState, sizeof(singlePktState));
+ clear_mem(&state, sizeof(state));
+ clear_mem(&keys, sizeof(keys));
+ clear_mem(&ivs, sizeof(ivs));
+#endif
+}
+
+void zuc_eea3_1_buffer_sse(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ void *pBufferOut,
+ const uint32_t length)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length < ZUC_MIN_LEN || length > ZUC_MAX_LEN)
+ return;
+#endif
+
+ _zuc_eea3_1_buffer_sse(pKey, pIv, pBufferIn, pBufferOut, length);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+void zuc_eea3_4_buffer_sse(const void * const pKey[4],
+ const void * const pIv[4],
+ const void * const pBufferIn[4],
+ void *pBufferOut[4],
+ const uint32_t length[4])
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+#ifdef SAFE_PARAM
+ unsigned int i;
+
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL || length == NULL)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ if (pKey[i] == NULL || pIv[i] == NULL ||
+ pBufferIn[i] == NULL || pBufferOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length[i] < ZUC_MIN_LEN || length[i] > ZUC_MAX_LEN)
+ return;
+ }
+#endif
+
+ _zuc_eea3_4_buffer_sse(pKey, pIv, pBufferIn, pBufferOut, length);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+
+void zuc_eea3_n_buffer_sse(const void * const pKey[], const void * const pIv[],
+ const void * const pBufferIn[], void *pBufferOut[],
+ const uint32_t length[],
+ const uint32_t numBuffers)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+
+ unsigned int i;
+ unsigned int packetCount = numBuffers;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL ||
+ pBufferOut == NULL || length == NULL)
+ return;
+
+ for (i = 0; i < numBuffers; i++) {
+ if (pKey[i] == NULL || pIv[i] == NULL ||
+ pBufferIn[i] == NULL || pBufferOut[i] == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (length[i] < ZUC_MIN_LEN || length[i] > ZUC_MAX_LEN)
+ return;
+ }
+#endif
+ i = 0;
+
+ while(packetCount >= 4) {
+ packetCount -=4;
+ _zuc_eea3_4_buffer_sse(&pKey[i],
+ &pIv[i],
+ &pBufferIn[i],
+ &pBufferOut[i],
+ &length[i]);
+ i+=4;
+ }
+
+ while(packetCount--) {
+ _zuc_eea3_1_buffer_sse(pKey[i],
+ pIv[i],
+ pBufferIn[i],
+ pBufferOut[i],
+ length[i]);
+ i++;
+ }
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data in registers */
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
+
+static inline uint64_t rotate_left(uint64_t u, size_t r)
+{
+ return (((u) << (r)) | ((u) >> (64 - (r))));
+}
+
+static inline uint64_t load_uint64(const void *ptr)
+{
+ return *((const uint64_t *)ptr);
+}
+
+void zuc_eia3_1_buffer_sse(const void *pKey,
+ const void *pIv,
+ const void *pBufferIn,
+ const uint32_t lengthInBits,
+ uint32_t *pMacI)
+{
+#ifndef LINUX
+ DECLARE_ALIGNED(uint128_t xmm_save[10], 16);
+
+ SAVE_XMMS(xmm_save);
+#endif
+ DECLARE_ALIGNED(ZucState_t zucState, 64);
+ DECLARE_ALIGNED(uint32_t keyStream[16 * 2], 64);
+ const uint32_t keyStreamLengthInBits = ZUC_KEYSTR_LEN * 8;
+ /* generate a key-stream 2 words longer than the input message */
+ const uint32_t N = lengthInBits + (2 * ZUC_WORD);
+ uint32_t L = (N + 31) / ZUC_WORD;
+ uint32_t *pZuc = (uint32_t *) &keyStream[0];
+ uint32_t remainingBits = lengthInBits;
+ uint32_t T = 0;
+ const uint8_t *pIn8 = (const uint8_t *) pBufferIn;
+
+#ifdef SAFE_PARAM
+ /* Check for NULL pointers */
+ if (pKey == NULL || pIv == NULL || pBufferIn == NULL || pMacI == NULL)
+ return;
+
+ /* Check input data is in range of supported length */
+ if (lengthInBits < ZUC_MIN_LEN || lengthInBits > ZUC_MAX_LEN)
+ return;
+#endif
+
+ memset(&zucState, 0, sizeof(ZucState_t));
+
+ asm_ZucInitialization(pKey, pIv, &(zucState));
+ asm_ZucGenKeystream64B(pZuc, &zucState);
+
+ /* loop over the message bits */
+ while (remainingBits >= keyStreamLengthInBits) {
+ remainingBits -= keyStreamLengthInBits;
+ L -= (keyStreamLengthInBits / 32);
+
+ /* Generate the next key stream 8 bytes or 64 bytes */
+ if (!remainingBits)
+ asm_ZucGenKeystream8B(&keyStream[16], &zucState);
+ else
+ asm_ZucGenKeystream64B(&keyStream[16], &zucState);
+ T = asm_Eia3Round64BSSE(T, &keyStream[0], pIn8);
+ memcpy(&keyStream[0], &keyStream[16], 16 * sizeof(uint32_t));
+ pIn8 = &pIn8[ZUC_KEYSTR_LEN];
+ }
+
+ /*
+ * If remaining bits has more than 14 ZUC WORDS (double words),
+ * keystream needs to have up to another 2 ZUC WORDS (8B)
+ */
+ if (remainingBits > (14 * 32))
+ asm_ZucGenKeystream8B(&keyStream[16], &zucState);
+ T ^= asm_Eia3RemainderSSE(&keyStream[0], pIn8, remainingBits);
+ T ^= rotate_left(load_uint64(&keyStream[remainingBits / 32]),
+ remainingBits % 32);
+
+ /* save the final MAC-I result */
+ uint32_t keyBlock = keyStream[L - 1];
+ *pMacI = bswap4(T ^ keyBlock);
+
+#ifdef SAFE_DATA
+ /* Clear sensitive data (in registers and stack) */
+ clear_mem(keyStream, sizeof(keyStream));
+ clear_mem(&zucState, sizeof(zucState));
+ CLEAR_SCRATCH_GPS();
+ CLEAR_SCRATCH_SIMD_REGS();
+#endif
+#ifndef LINUX
+ RESTORE_XMMS(xmm_save);
+#endif
+}
diff --git a/src/spdk/intel-ipsec-mb/version.c b/src/spdk/intel-ipsec-mb/version.c
new file mode 100644
index 000000000..0ba9670ab
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/version.c
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ Copyright (c) 2018, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+#include "intel-ipsec-mb.h"
+
+/* Set library version */
+const char *imb_version_str = IMB_VERSION_STR;
+const unsigned imb_version = IMB_VERSION_NUM;
+
+const char *imb_get_version_str(void)
+{
+ return imb_version_str;
+}
+
+unsigned imb_get_version(void)
+{
+ return imb_version;
+}
diff --git a/src/spdk/intel-ipsec-mb/win_x64.mak b/src/spdk/intel-ipsec-mb/win_x64.mak
new file mode 100644
index 000000000..d4bcffd20
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/win_x64.mak
@@ -0,0 +1,485 @@
+#
+# Copyright (c) 2017-2019, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Available build options:
+# DEBUG=y - this option will produce library fit for debugging
+# DEBUG=n - this option will produce library not fit for debugging (default)
+# SHARED=y - this option will produce shared library (DLL) (default)
+# SHARED=n - this option will produce static library (lib)
+# SAFE_DATA=y - this option will clear memory and registers containing
+# sensitive information (e.g. keys, IVs)
+# SAFE_PARAM=y - this option will add extra input parameter checks
+# SAFE_LOOKUP=y - this option will perform constant-time lookups depending on
+# sensitive data (default)
+# GCM_BIG_DATA=y
+# - Better performing VAES GCM on big buffers using more ghash keys (~5% up).
+# This option results in a much bigger gcm_key structure (>2K)
+
+!if !defined(SHARED)
+SHARED = y
+!endif
+
+# Available installation options:
+# PREFIX=<path> - path to install the library (c:\program files\ is default)
+
+!if !defined(PREFIX)
+PREFIX = c:\Program Files
+!endif
+INSTDIR = $(PREFIX)\intel-ipsec-mb
+
+LIBBASE = libIPSec_MB
+!if "$(SHARED)" == "y"
+LIBNAME = $(LIBBASE).dll
+!else
+LIBNAME = $(LIBBASE).lib
+!endif
+OBJ_DIR = obj
+
+!ifdef DEBUG
+OPT = /Od
+DCFLAGS = /DDEBUG /Z7
+DAFLAGS = -gcv8
+DLFLAGS = /DEBUG
+!else
+OPT = /O2 /Oi
+DCFLAGS =
+DAFLAGS =
+DLFLAGS = /RELEASE
+!endif
+
+!if "$(SAFE_DATA)" == "y"
+DCFLAGS = $(DCFLAGS) /DSAFE_DATA
+DAFLAGS = $(DAFLAGS) -DSAFE_DATA
+!endif
+
+!if "$(SAFE_PARAM)" == "y"
+DCFLAGS = $(DCFLAGS) /DSAFE_PARAM
+DAFLAGS = $(DAFLAGS) -DSAFE_PARAM
+!endif
+
+!if "$(SAFE_LOOKUP)" != "n"
+DCFLAGS = $(DCFLAGS) /DSAFE_LOOKUP
+DAFLAGS = $(DAFLAGS) -DSAFE_LOOKUP
+!endif
+
+!if "$(GCM_BIG_DATA)" == "y"
+GCM_AFLAGS = -DGCM_BIG_DATA
+GCM_CFLAGS = /DGCM_BIG_DATA
+!else
+GCM_AFLAGS =
+GCM_CFLAGS =
+!endif
+
+CC = cl
+CFLAGS_ALL = $(EXTRA_CFLAGS) $(GCM_CFLAGS) /I. /Iinclude /Ino-aesni \
+ /nologo /Y- /W3 /WX- /Gm- /fp:precise /EHsc
+
+CFLAGS = $(CFLAGS_ALL) $(OPT) $(DCFLAGS)
+CFLAGS_NO_SIMD = $(CFLAGS_ALL) /Od $(DCFLAGS)
+
+LIB_TOOL = lib
+LIBFLAGS = /nologo /machine:X64 /nodefaultlib
+
+LINK_TOOL = link
+LINKFLAGS = $(DLFLAGS) /nologo /machine:X64
+
+AS = nasm
+AFLAGS = $(DAFLAGS) $(GCM_AFLAGS) -fwin64 -Xvc -DWIN_ABI -Iinclude/ \
+ -I./ -Iavx/ -Iavx2/ -Iavx512/ -Isse/
+
+# warning messages
+
+SAFE_PARAM_MSG1=SAFE_PARAM option not set.
+SAFE_PARAM_MSG2=Input parameters will not be checked.
+SAFE_DATA_MSG1=SAFE_DATA option not set.
+SAFE_DATA_MSG2=Stack and registers containing sensitive information, \
+ such keys or IV will not be cleared \
+ at the end of function calls.
+SAFE_LOOKUP_MSG1=SAFE_LOOKUP option not set.
+SAFE_LOOKUP_MSG2=Lookups which depend on sensitive information \
+ are not guaranteed to be done in constant time.
+
+lib_objs1 = \
+ $(OBJ_DIR)\aes128_cbc_dec_by4_sse.obj \
+ $(OBJ_DIR)\aes128_cbc_dec_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes128_cbc_dec_by8_avx.obj \
+ $(OBJ_DIR)\aes_ecb_by4_sse.obj \
+ $(OBJ_DIR)\aes_ecb_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes_ecb_by4_avx.obj \
+ $(OBJ_DIR)\pon_sse.obj \
+ $(OBJ_DIR)\pon_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes128_cntr_by4_sse.obj \
+ $(OBJ_DIR)\aes128_cntr_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\pon_avx.obj \
+ $(OBJ_DIR)\aes128_cntr_by8_avx.obj \
+ $(OBJ_DIR)\aes128_cntr_ccm_by4_sse.obj \
+ $(OBJ_DIR)\aes128_cntr_ccm_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes128_cntr_ccm_by8_avx.obj \
+ $(OBJ_DIR)\aes128_ecbenc_x3.obj \
+ $(OBJ_DIR)\aes192_cbc_dec_by4_sse.obj \
+ $(OBJ_DIR)\aes192_cbc_dec_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes192_cbc_dec_by8_avx.obj \
+ $(OBJ_DIR)\aes192_cntr_by4_sse.obj \
+ $(OBJ_DIR)\aes192_cntr_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes192_cntr_by8_avx.obj \
+ $(OBJ_DIR)\aes256_cbc_dec_by4_sse.obj \
+ $(OBJ_DIR)\aes256_cbc_dec_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes256_cbc_dec_by8_avx.obj \
+ $(OBJ_DIR)\aes256_cntr_by4_sse.obj \
+ $(OBJ_DIR)\aes256_cntr_by4_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes256_cntr_by8_avx.obj \
+ $(OBJ_DIR)\aes_cfb_128_sse.obj \
+ $(OBJ_DIR)\aes_cfb_128_sse_no_aesni.obj \
+ $(OBJ_DIR)\aes_cfb_128_avx.obj \
+ $(OBJ_DIR)\aes128_cbc_mac_x4.obj \
+ $(OBJ_DIR)\aes128_cbc_mac_x4_no_aesni.obj \
+ $(OBJ_DIR)\aes128_cbc_mac_x8.obj \
+ $(OBJ_DIR)\aes_cbc_enc_128_x4.obj \
+ $(OBJ_DIR)\aes_cbc_enc_128_x4_no_aesni.obj \
+ $(OBJ_DIR)\aes_cbc_enc_128_x8.obj \
+ $(OBJ_DIR)\aes_cbc_enc_192_x4.obj \
+ $(OBJ_DIR)\aes_cbc_enc_192_x4_no_aesni.obj \
+ $(OBJ_DIR)\aes_cbc_enc_192_x8.obj \
+ $(OBJ_DIR)\aes_cbc_enc_256_x4.obj \
+ $(OBJ_DIR)\aes_cbc_enc_256_x4_no_aesni.obj \
+ $(OBJ_DIR)\aes_cbc_enc_256_x8.obj \
+ $(OBJ_DIR)\aes_keyexp_128.obj \
+ $(OBJ_DIR)\aes_keyexp_192.obj \
+ $(OBJ_DIR)\aes_keyexp_256.obj \
+ $(OBJ_DIR)\aes_cmac_subkey_gen.obj \
+ $(OBJ_DIR)\aes_xcbc_mac_128_x4.obj \
+ $(OBJ_DIR)\aes_xcbc_mac_128_x4_no_aesni.obj \
+ $(OBJ_DIR)\aes_xcbc_mac_128_x8.obj \
+ $(OBJ_DIR)\md5_x4x2_avx.obj \
+ $(OBJ_DIR)\md5_x4x2_sse.obj \
+ $(OBJ_DIR)\md5_x8x2_avx2.obj \
+ $(OBJ_DIR)\save_xmms.obj \
+ $(OBJ_DIR)\clear_regs_mem_fns.obj \
+ $(OBJ_DIR)\sha1_mult_avx.obj \
+ $(OBJ_DIR)\sha1_mult_sse.obj \
+ $(OBJ_DIR)\sha1_ni_x2_sse.obj \
+ $(OBJ_DIR)\sha1_one_block_avx.obj \
+ $(OBJ_DIR)\sha1_one_block_sse.obj \
+ $(OBJ_DIR)\sha1_x8_avx2.obj \
+ $(OBJ_DIR)\sha1_x16_avx512.obj \
+ $(OBJ_DIR)\sha224_one_block_avx.obj \
+ $(OBJ_DIR)\sha224_one_block_sse.obj \
+ $(OBJ_DIR)\sha256_oct_avx2.obj \
+ $(OBJ_DIR)\sha256_one_block_avx.obj \
+ $(OBJ_DIR)\sha256_one_block_sse.obj \
+ $(OBJ_DIR)\sha256_ni_x2_sse.obj \
+ $(OBJ_DIR)\sha256_x16_avx512.obj \
+ $(OBJ_DIR)\sha384_one_block_avx.obj \
+ $(OBJ_DIR)\sha384_one_block_sse.obj \
+ $(OBJ_DIR)\sha512_one_block_avx.obj \
+ $(OBJ_DIR)\sha512_one_block_sse.obj \
+ $(OBJ_DIR)\sha512_x2_avx.obj \
+ $(OBJ_DIR)\sha512_x2_sse.obj \
+ $(OBJ_DIR)\sha512_x4_avx2.obj \
+ $(OBJ_DIR)\sha512_x8_avx512.obj \
+ $(OBJ_DIR)\sha_256_mult_avx.obj \
+ $(OBJ_DIR)\sha_256_mult_sse.obj \
+ $(OBJ_DIR)\kasumi_avx.obj \
+ $(OBJ_DIR)\kasumi_iv.obj \
+ $(OBJ_DIR)\kasumi_sse.obj \
+ $(OBJ_DIR)\zuc_common.obj \
+ $(OBJ_DIR)\zuc_sse_top.obj \
+ $(OBJ_DIR)\zuc_avx_top.obj \
+ $(OBJ_DIR)\zuc_sse.obj \
+ $(OBJ_DIR)\zuc_avx.obj \
+ $(OBJ_DIR)\zuc_iv.obj \
+ $(OBJ_DIR)\snow3g_sse.obj \
+ $(OBJ_DIR)\snow3g_sse_no_aesni.obj \
+ $(OBJ_DIR)\snow3g_avx.obj \
+ $(OBJ_DIR)\snow3g_avx2.obj \
+ $(OBJ_DIR)\snow3g_tables.obj \
+ $(OBJ_DIR)\snow3g_iv.obj \
+ $(OBJ_DIR)\aes_xcbc_expand_key.obj \
+ $(OBJ_DIR)\md5_one_block.obj \
+ $(OBJ_DIR)\sha_one_block.obj \
+ $(OBJ_DIR)\des_key.obj \
+ $(OBJ_DIR)\des_basic.obj \
+ $(OBJ_DIR)\des_x16_avx512.obj \
+ $(OBJ_DIR)\cntr_vaes_avx512.obj \
+ $(OBJ_DIR)\aes_cbc_dec_vaes_avx512.obj \
+ $(OBJ_DIR)\aes_cbc_enc_vaes_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_flush_avx512.obj \
+ $(OBJ_DIR)\const.obj \
+ $(OBJ_DIR)\wireless_common.obj \
+ $(OBJ_DIR)\constant_lookup.obj
+
+lib_objs2 = \
+ $(OBJ_DIR)\mb_mgr_aes192_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes192_submit_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes256_submit_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_submit_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_cmac_submit_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_cmac_submit_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_cmac_submit_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_ccm_auth_submit_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_ccm_auth_submit_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_ccm_auth_submit_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_flush_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_aes_xcbc_submit_sse_no_aesni.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_flush_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_md5_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_flush_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_224_submit_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_flush_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_256_submit_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_384_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_flush_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_flush_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_flush_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_flush_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_sha_512_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_submit_avx.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_submit_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_submit_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_submit_ni_sse.obj \
+ $(OBJ_DIR)\mb_mgr_hmac_submit_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_avx.obj \
+ $(OBJ_DIR)\mb_mgr_avx2.obj \
+ $(OBJ_DIR)\mb_mgr_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_des_avx512.obj \
+ $(OBJ_DIR)\mb_mgr_sse.obj \
+ $(OBJ_DIR)\mb_mgr_sse_no_aesni.obj \
+ $(OBJ_DIR)\alloc.obj \
+ $(OBJ_DIR)\version.obj \
+ $(OBJ_DIR)\cpu_feature.obj \
+ $(OBJ_DIR)\aesni_emu.obj
+
+gcm_objs = \
+ $(OBJ_DIR)\gcm.obj \
+ $(OBJ_DIR)\gcm128_sse.obj \
+ $(OBJ_DIR)\gcm128_avx_gen2.obj \
+ $(OBJ_DIR)\gcm128_avx_gen4.obj \
+ $(OBJ_DIR)\gcm128_avx512.obj \
+ $(OBJ_DIR)\gcm128_vaes_avx512.obj \
+ $(OBJ_DIR)\gcm192_sse.obj \
+ $(OBJ_DIR)\gcm192_avx_gen2.obj \
+ $(OBJ_DIR)\gcm192_avx_gen4.obj \
+ $(OBJ_DIR)\gcm192_avx512.obj \
+ $(OBJ_DIR)\gcm192_vaes_avx512.obj \
+ $(OBJ_DIR)\gcm256_sse.obj \
+ $(OBJ_DIR)\gcm256_avx_gen2.obj \
+ $(OBJ_DIR)\gcm256_avx_gen4.obj \
+ $(OBJ_DIR)\gcm256_avx512.obj \
+ $(OBJ_DIR)\gcm256_vaes_avx512.obj \
+ $(OBJ_DIR)\gcm128_sse_no_aesni.obj \
+ $(OBJ_DIR)\gcm192_sse_no_aesni.obj \
+ $(OBJ_DIR)\gcm256_sse_no_aesni.obj
+
+!ifdef NO_GCM
+all_objs = $(lib_objs1) $(lib_objs2)
+CFLAGS = $(CFLAGS) -DNO_GCM
+!else
+all_objs = $(lib_objs1) $(lib_objs2) $(gcm_objs)
+!endif
+
+all: $(LIBNAME)
+
+$(LIBNAME): $(all_objs)
+!if "$(SHARED)" == "y"
+ $(LINK_TOOL) $(LINKFLAGS) /DLL /DEF:libIPSec_MB.def /OUT:$@ $(all_objs)
+!else
+ $(LIB_TOOL) $(LIBFLAGS) /out:$@ $(all_objs)
+!endif
+!if "$(SAFE_PARAM)" != "y"
+ @echo NOTE: $(SAFE_PARAM_MSG1) $(SAFE_PARAM_MSG2)
+!endif
+!if "$(SAFE_DATA)" != "y"
+ @echo NOTE: $(SAFE_DATA_MSG1) $(SAFE_DATA_MSG2)
+!endif
+
+!if "$(SAFE_LOOKUP)" == "n"
+ @echo NOTE: $(SAFE_LOOKUP_MSG1) $(SAFE_LOOKUP_MSG2)
+!endif
+
+$(all_objs): $(OBJ_DIR)
+
+{.\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /Fo$@ /c $(CFLAGS) $<
+
+{.\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{sse\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /Fo$@ /c $(CFLAGS) $<
+
+{sse\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{avx\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /arch:AVX /Fo$@ /c $(CFLAGS) $<
+
+{avx\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{avx2\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /arch:AVX /Fo$@ /c $(CFLAGS) $<
+
+{avx2\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{avx512\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /arch:AVX /Fo$@ /c $(CFLAGS) $<
+
+{avx512\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{no-aesni\}.c{$(OBJ_DIR)}.obj:
+ $(CC) /Fo$@ /c $(CFLAGS_NO_SIMD) $<
+
+{no-aesni\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+{include\}.asm{$(OBJ_DIR)}.obj:
+ $(AS) -o $@ $(AFLAGS) $<
+
+$(OBJ_DIR):
+ mkdir $(OBJ_DIR)
+
+help:
+ @echo "Available build options:"
+ @echo "DEBUG=n (default)"
+ @echo " - this option will produce library not fit for debugging"
+ @echo "SHARED=y (default)"
+ @echo " - this option will produce shared library"
+ @echo "DEBUG=y - this option will produce library fit for debugging"
+ @echo "SHARED=n - this option will produce static library"
+ @echo "SAFE_DATA=n (default)"
+ @echo " - Sensitive data not cleared from registers and memory"
+ @echo " at operation end"
+ @echo "SAFE_DATA=y"
+ @echo " - Sensitive data cleared from registers and memory"
+ @echo " at operation end"
+ @echo "SAFE_PARAM=n (default)"
+ @echo " - API input parameters not checked"
+ @echo "SAFE_PARAM=y"
+ @echo " - API input parameters checked"
+ @echo "SAFE_LOOKUP=n"
+ @echo " - Lookups depending on sensitive data might not be constant time"
+ @echo "SAFE_LOOKUP=y (default)"
+ @echo " - Lookups depending on sensitive data are constant time"
+ @echo "GCM_BIG_DATA=n (default)"
+ @echo " - Smaller AVX512VAES GCM key structure with"
+ @echo " good performance level for buffers sizes below 2K."
+ @echo " - 8 ghash keys used on SSE, AVX, AVX2 and AVX512."
+ @echo " - 48 ghash keys used on AVX512VAES and AVX512VPCLMULQDQ."
+ @echo "GCM_BIG_DATA=y"
+ @echo " - Better performing AVX512VAES GCM on big buffers that"
+ @echo " uses more ghash keys, 128 instead of 48."
+ @echo " - This option results in a much bigger gcm_key structure, more than 2K."
+ @echo " - Performance improvement takes effect only on platforms with"
+ @echo " AVX512VAES and AVX512VPCLMULQDQ."
+
+clean:
+ -del /q $(lib_objs1)
+ -del /q $(lib_objs2)
+ -del /q $(gcm_objs)
+ -del /q $(LIBNAME).*
+
+install:
+ -md "$(INSTDIR)"
+ -copy /Y /V /A $(LIBBASE).def "$(INSTDIR)"
+ -copy /Y /V /B $(LIBBASE).exp "$(INSTDIR)"
+ -copy /Y /V /B $(LIBBASE).lib "$(INSTDIR)"
+ -copy /Y /V /A intel-ipsec-mb.h "$(INSTDIR)"
+!if "$(SHARED)" == "y"
+ -copy /Y /V /B $(LIBBASE).dll "$(INSTDIR)"
+ -copy /Y /V /B $(LIBBASE).dll "%windir%\system32"
+!endif
+
+uninstall:
+!if "$(SHARED)" == "y"
+ -del /Q "%windir%\system32\$(LIBBASE).dll"
+ -del /Q "$(INSTDIR)\$(LIBBASE).dll"
+!endif
+ -del /Q "$(INSTDIR)\$(LIBBASE).def"
+ -del /Q "$(INSTDIR)\$(LIBBASE).exp"
+ -del /Q "$(INSTDIR)\$(LIBBASE).lib"
+ -del /Q "$(INSTDIR)\intel-ipsec-mb.h"
+ -rd "$(INSTDIR)"
diff --git a/src/spdk/intel-ipsec-mb/zuc_iv.c b/src/spdk/intel-ipsec-mb/zuc_iv.c
new file mode 100644
index 000000000..72dd28852
--- /dev/null
+++ b/src/spdk/intel-ipsec-mb/zuc_iv.c
@@ -0,0 +1,103 @@
+/*******************************************************************************
+ Copyright (c) 2019, Intel Corporation
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#include <string.h>
+
+#include "intel-ipsec-mb.h"
+#include "include/wireless_common.h"
+
+int
+zuc_eea3_iv_gen(const uint32_t count, const uint8_t bearer,
+ const uint8_t dir, void *iv_ptr)
+{
+ uint8_t *iv = (uint8_t *) iv_ptr;
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv == NULL)
+ return -1;
+
+ /* Bearer must contain 5 bits only */
+ if (bearer >= (1<<5))
+ return -1;
+
+ /* Direction must contain 1 bit only */
+ if (dir > 1)
+ return -1;
+
+ /* IV[0-3] = COUNT */
+ iv32[0] = bswap4(count);
+
+ /* IV[4] = BEARER || DIRECTION || 0s */
+ iv[4] = (bearer << 3) + (dir << 2);
+
+ /* IV[5-7] = Os */
+ memset(&iv[5], 0, 3);
+
+ /* IV[8-15] = IV[0-7] */
+ memcpy(&iv[8], &iv[0], 8);
+
+ return 0;
+}
+
+int
+zuc_eia3_iv_gen(const uint32_t count, const uint8_t bearer,
+ const uint8_t dir, void *iv_ptr)
+{
+ uint8_t *iv = (uint8_t *) iv_ptr;
+ uint32_t *iv32 = (uint32_t *) iv_ptr;
+
+ if (iv == NULL)
+ return -1;
+
+ /* Bearer must contain 5 bits only */
+ if (bearer >= (1<<5))
+ return -1;
+
+ /* Direction must contain 1 bit only */
+ if (dir > 1)
+ return -1;
+
+ /* IV[0-3] = COUNT */
+ iv32[0] = bswap4(count);
+
+ /* IV[4] = BEARER || 0s */
+ iv[4] = bearer << 3;
+
+ /* IV[5-7] = Os */
+ memset(&iv[5], 0, 3);
+
+ /* IV[8-15] = IV[0-7] */
+ memcpy(&iv[8], &iv[0], 8);
+
+ /* IV[8] = IV[0] XOR (DIR << 7) */
+ iv[8] ^= (dir << 7);
+
+ /* IV[14] = IV[6] XOR (DIR << 7) */
+ iv[14] ^= (dir << 7);
+
+ return 0;
+}