summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/crypto
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/drivers/crypto
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/crypto')
-rw-r--r--src/spdk/dpdk/drivers/crypto/Makefile25
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile32
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h116
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c585
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c333
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h110
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h221
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c1046
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c621
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h254
-rw-r--r--src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/Makefile40
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c851
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c348
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h197
-rw-r--r--src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c833
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h107
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/meson.build21
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c397
-rw-r--r--src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile55
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c2931
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h449
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h126
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h2568
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h646
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h98
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h1521
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h921
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h313
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h218
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h174
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h189
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h302
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h369
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h412
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h163
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h570
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h699
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h790
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h175
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h152
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c643
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h399
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h190
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build14
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c2419
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h452
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h43
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build13
-rw-r--r--src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map4
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c629
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c319
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h75
-rw-r--r--src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/meson.build9
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/Makefile42
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/meson.build21
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h23
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c937
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c722
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h95
-rw-r--r--src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/meson.build6
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c253
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c331
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h52
-rw-r--r--src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/Makefile29
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/compat.h108
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/meson.build11
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c2194
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c1264
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h185
-rw-r--r--src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/README7
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym.c569
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym.h174
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h557
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c331
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h41
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c1725
-rw-r--r--src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h145
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/Makefile37
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c584
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h284
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h56
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map21
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c220
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c413
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c420
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c572
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c545
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h116
-rw-r--r--src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c212
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c625
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c321
-rw-r--r--src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h77
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c1505
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h64
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c527
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c43
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h171
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/Makefile38
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c548
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c322
-rw-r--r--src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h77
136 files changed, 49131 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/crypto/Makefile b/src/spdk/dpdk/drivers/crypto/Makefile
new file mode 100644
index 00000000..c480cbd3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile b/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 00000000..0a5c1a87
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 00000000..45061669
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <intel-ipsec-mb.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_VECTOR_NUM
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_192,
+ AESNI_GCM_KEY_256,
+ AESNI_GCM_KEY_NUM
+};
+
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data, uint8_t *out,
+ const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
+ const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ const uint8_t *iv,
+ uint8_t const *aad,
+ uint64_t aad_len);
+
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t plaintext_len);
+
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
+ aesni_gcm_init_t init;
+ aesni_gcm_update_t update_enc;
+ aesni_gcm_update_t update_dec;
+ aesni_gcm_finalize_t finalize;
+};
+
+#define AES_GCM_FN(keylen, arch) \
+aes_gcm_enc_##keylen##_##arch,\
+aes_gcm_dec_##keylen##_##arch,\
+aes_gcm_pre_##keylen##_##arch,\
+aes_gcm_init_##keylen##_##arch,\
+aes_gcm_enc_##keylen##_update_##arch,\
+aes_gcm_dec_##keylen##_update_##arch,\
+aes_gcm_enc_##keylen##_finalize_##arch,
+
+static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ [AESNI_GCM_KEY_128] = {NULL},
+ [AESNI_GCM_KEY_192] = {NULL},
+ [AESNI_GCM_KEY_256] = {NULL}
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, sse)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, sse)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, sse)
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen2)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen2)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen2)
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen4)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen4)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen4)
+ }
+ }
+};
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 00000000..752e0cd6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_byteorder.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *aead_xform;
+ uint16_t digest_length;
+ uint8_t key_length;
+ uint8_t *key;
+
+ /* AES-GMAC */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = xform;
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+ AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
+ "authentication only algorithm");
+ return -ENOTSUP;
+ }
+ /* Set IV parameters */
+ sess->iv.offset = auth_xform->auth.iv.offset;
+ sess->iv.length = auth_xform->auth.iv.length;
+
+ /* Select Crypto operation */
+ if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GMAC_OP_GENERATE;
+ else
+ sess->op = AESNI_GMAC_OP_VERIFY;
+
+ key_length = auth_xform->auth.key.length;
+ key = auth_xform->auth.key.data;
+ digest_length = auth_xform->auth.digest_length;
+
+ /* AES-GCM */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = xform;
+
+ if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+ AESNI_GCM_LOG(ERR, "The only combined operation "
+ "supported is AES GCM");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = aead_xform->aead.iv.offset;
+ sess->iv.length = aead_xform->aead.iv.length;
+
+ /* Select Crypto operation */
+ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+ key_length = aead_xform->aead.key.length;
+ key = aead_xform->aead.key.data;
+
+ sess->aad_length = aead_xform->aead.aad_length;
+ digest_length = aead_xform->aead.digest_length;
+ } else {
+ AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
+ return -ENOTSUP;
+ }
+
+
+ /* IV check */
+ if (sess->iv.length != 16 && sess->iv.length != 12 &&
+ sess->iv.length != 0) {
+ AESNI_GCM_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+
+ /* Check key length and calculate GCM pre-compute. */
+ switch (key_length) {
+ case 16:
+ sess->key = AESNI_GCM_KEY_128;
+ break;
+ case 24:
+ sess->key = AESNI_GCM_KEY_192;
+ break;
+ case 32:
+ sess->key = AESNI_GCM_KEY_256;
+ break;
+ default:
+ AESNI_GCM_LOG(ERR, "Invalid key length");
+ return -EINVAL;
+ }
+
+ gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
+ /* Digest check */
+ if (digest_length != 16 &&
+ digest_length != 12 &&
+ digest_length != 8) {
+ AESNI_GCM_LOG(ERR, "Invalid digest length");
+ return -EINVAL;
+ }
+ sess->digest_length = digest_length;
+
+ return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_gcm_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct aesni_gcm_session *)
+ get_sym_session_private_data(
+ sym_op->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_gcm_session *)_sess_private_data;
+
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+ sess, sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(sym_op->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
+ *
+ * @param qp queue pair
+ * @param op symmetric crypto operation
+ * @param session GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv_ptr;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_mbuf *m_src = sym_op->m_src;
+ uint32_t offset, data_offset, data_length;
+ uint32_t part_len, total_len, data_len;
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+ session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ offset = sym_op->aead.data.offset;
+ data_offset = offset;
+ data_length = sym_op->aead.data.length;
+ } else {
+ offset = sym_op->auth.data.offset;
+ data_offset = offset;
+ data_length = sym_op->auth.data.length;
+ }
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len && data_length != 0) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < data_length) ? data_len :
+ data_length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == data_length) ||
+ ((part_len != data_length) &&
+ (sym_op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((sym_op->m_dst == NULL) ||
+ ((sym_op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(sym_op->m_dst)));
+
+
+ dst = sym_op->m_dst ?
+ rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+ data_offset) :
+ rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ data_offset);
+
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ /*
+ * GCM working in 12B IV mode => 16B pre-counter block we need
+ * to set BE LSB to 1, driver expects that 16B is allocated
+ */
+ if (session->iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
+ *iv_padd = rte_bswap32(1);
+ }
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
+
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->aead.digest.data,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = qp->temp_digest;
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
+
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
+ (uint64_t)part_len);
+ total_len = data_length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ uint8_t *auth_tag = qp->temp_digest;
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
+ }
+
+ return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
+{
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Verify digest if required */
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+ session->op == AESNI_GMAC_OP_VERIFY) {
+ uint8_t *digest;
+
+ uint8_t *tag = qp->temp_digest;
+
+ if (session->op == AESNI_GMAC_OP_VERIFY)
+ digest = op->sym->auth.digest.data;
+ else
+ digest = op->sym->aead.digest.data;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+ rte_hexdump(stdout, "auth tag (orig):",
+ digest, session->digest_length);
+ rte_hexdump(stdout, "auth tag (calc):",
+ tag, session->digest_length);
+#endif
+
+ if (memcmp(tag, digest, session->digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp Queue Pair to process
+ * @param op Crypto operation
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *sess)
+{
+ post_process_gcm_crypto_op(qp, op, sess);
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_session *sess;
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ int retval = 0;
+ unsigned int i, nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ for (i = 0; i < nb_dequeued; i++) {
+
+ sess = aesni_gcm_get_session(qp, ops[i]);
+ if (unlikely(sess == NULL)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ retval = process_gcm_crypto_op(qp, ops[i], sess);
+ if (retval < 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ handle_completed_gcm_crypto_op(qp, ops[i], sess);
+ }
+
+ qp->qp_stats.dequeued_count += i;
+
+ return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+static int aesni_gcm_remove(struct rte_vdev_device *vdev);
+
+static int
+aesni_gcm_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ AESNI_GCM_LOG(ERR, "driver %s: create failed",
+ init_params->name);
+ return -ENODEV;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else
+ vector_mode = RTE_AESNI_GCM_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
+
+ return 0;
+}
+
+static int
+aesni_gcm_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_gcm_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return aesni_gcm_create(name, vdev, &init_params);
+}
+
+static int
+aesni_gcm_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver aesni_gcm_pmd_drv = {
+ .probe = aesni_gcm_probe,
+ .remove = aesni_gcm_remove
+};
+
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
+ cryptodev_driver_id);
+
+
+RTE_INIT(aesni_gcm_init_log)
+{
+ aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 00000000..b6b4dd02
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
+
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_gcm_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_gcm_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+ AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_gcm_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+
+ qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static int
+aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (unlikely(sess == NULL)) {
+ AESNI_GCM_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ AESNI_GCM_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
+ AESNI_GCM_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+ .dev_configure = aesni_gcm_pmd_config,
+ .dev_start = aesni_gcm_pmd_start,
+ .dev_stop = aesni_gcm_pmd_stop,
+ .dev_close = aesni_gcm_pmd_close,
+
+ .stats_get = aesni_gcm_pmd_stats_get,
+ .stats_reset = aesni_gcm_pmd_stats_reset,
+
+ .dev_infos_get = aesni_gcm_pmd_info_get,
+
+ .queue_pair_setup = aesni_gcm_pmd_qp_setup,
+ .queue_pair_release = aesni_gcm_pmd_qp_release,
+ .queue_pair_count = aesni_gcm_pmd_qp_count,
+
+ .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_gcm_pmd_sym_session_configure,
+ .sym_session_clear = aesni_gcm_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 00000000..c13a12a5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< AES-NI GCM PMD device name */
+
+/** AES-NI GCM PMD LOGTYPE DRIVER */
+int aesni_gcm_logtype_driver;
+#define AESNI_GCM_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 16
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+struct aesni_gcm_qp {
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
+ /**< GCM parameters */
+ struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
+ /**< Queue pair statistics */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+ AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
+ AESNI_GMAC_OP_GENERATE,
+ AESNI_GMAC_OP_VERIFY
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< Digest length */
+ enum aesni_gcm_operation op;
+ /**< GCM operation type */
+ enum aesni_gcm_key key;
+ /**< GCM key type */
+ struct gcm_key_data gdata_key;
+ /**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param sess aesni gcm session structure
+ * @param xform crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile b/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile
new file mode 100644
index 00000000..806a95eb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_mb.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_mb_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h b/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
new file mode 100644
index 00000000..5a1cba6c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
+ */
+
+#ifndef _AESNI_MB_OPS_H_
+#define _AESNI_MB_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <intel-ipsec-mb.h>
+
+enum aesni_mb_vector_mode {
+ RTE_AESNI_MB_NOT_SUPPORTED = 0,
+ RTE_AESNI_MB_SSE,
+ RTE_AESNI_MB_AVX,
+ RTE_AESNI_MB_AVX2,
+ RTE_AESNI_MB_AVX512
+};
+
+typedef void (*md5_one_block_t)(const void *data, void *digest);
+
+typedef void (*sha1_one_block_t)(const void *data, void *digest);
+typedef void (*sha224_one_block_t)(const void *data, void *digest);
+typedef void (*sha256_one_block_t)(const void *data, void *digest);
+typedef void (*sha384_one_block_t)(const void *data, void *digest);
+typedef void (*sha512_one_block_t)(const void *data, void *digest);
+
+typedef void (*aes_keyexp_128_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_192_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_256_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_xcbc_expand_key_t)
+ (const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
+
+/** Multi-buffer library function pointer table */
+struct aesni_mb_op_fns {
+ struct {
+ init_mb_mgr_t init_mgr;
+ /**< Initialise scheduler */
+ get_next_job_t get_next;
+ /**< Get next free job structure */
+ submit_job_t submit;
+ /**< Submit job to scheduler */
+ get_completed_job_t get_completed_job;
+ /**< Get completed job */
+ flush_job_t flush_job;
+ /**< flush jobs from manager */
+ } job;
+ /**< multi buffer manager functions */
+
+ struct {
+ struct {
+ md5_one_block_t md5;
+ /**< MD5 one block hash */
+ sha1_one_block_t sha1;
+ /**< SHA1 one block hash */
+ sha224_one_block_t sha224;
+ /**< SHA224 one block hash */
+ sha256_one_block_t sha256;
+ /**< SHA256 one block hash */
+ sha384_one_block_t sha384;
+ /**< SHA384 one block hash */
+ sha512_one_block_t sha512;
+ /**< SHA512 one block hash */
+ } one_block;
+ /**< one block hash functions */
+
+ struct {
+ aes_keyexp_128_t aes128;
+ /**< AES128 key expansions */
+ aes_keyexp_192_t aes192;
+ /**< AES192 key expansions */
+ aes_keyexp_256_t aes256;
+ /**< AES256 key expansions */
+ aes_xcbc_expand_key_t aes_xcbc;
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
+ } keyexp;
+ /**< Key expansion functions */
+ } aux;
+ /**< Auxiliary functions */
+};
+
+
+static const struct aesni_mb_op_fns job_ops[] = {
+ [RTE_AESNI_MB_NOT_SUPPORTED] = {
+ .job = {
+ NULL
+ },
+ .aux = {
+ .one_block = {
+ NULL
+ },
+ .keyexp = {
+ NULL
+ }
+ }
+ },
+ [RTE_AESNI_MB_SSE] = {
+ .job = {
+ init_mb_mgr_sse,
+ get_next_job_sse,
+ submit_job_sse,
+ get_completed_job_sse,
+ flush_job_sse
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_sse,
+ sha1_one_block_sse,
+ sha224_one_block_sse,
+ sha256_one_block_sse,
+ sha384_one_block_sse,
+ sha512_one_block_sse
+ },
+ .keyexp = {
+ aes_keyexp_128_sse,
+ aes_keyexp_192_sse,
+ aes_keyexp_256_sse,
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX] = {
+ .job = {
+ init_mb_mgr_avx,
+ get_next_job_avx,
+ submit_job_avx,
+ get_completed_job_avx,
+ flush_job_avx
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx,
+ sha1_one_block_avx,
+ sha224_one_block_avx,
+ sha256_one_block_avx,
+ sha384_one_block_avx,
+ sha512_one_block_avx
+ },
+ .keyexp = {
+ aes_keyexp_128_avx,
+ aes_keyexp_192_avx,
+ aes_keyexp_256_avx,
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX2] = {
+ .job = {
+ init_mb_mgr_avx2,
+ get_next_job_avx2,
+ submit_job_avx2,
+ get_completed_job_avx2,
+ flush_job_avx2
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx2,
+ sha1_one_block_avx2,
+ sha224_one_block_avx2,
+ sha256_one_block_avx2,
+ sha384_one_block_avx2,
+ sha512_one_block_avx2
+ },
+ .keyexp = {
+ aes_keyexp_128_avx2,
+ aes_keyexp_192_avx2,
+ aes_keyexp_256_avx2,
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX512] = {
+ .job = {
+ init_mb_mgr_avx512,
+ get_next_job_avx512,
+ submit_job_avx512,
+ get_completed_job_avx512,
+ flush_job_avx512
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx512,
+ sha1_one_block_avx512,
+ sha224_one_block_avx512,
+ sha256_one_block_avx512,
+ sha384_one_block_avx512,
+ sha512_one_block_avx512
+ },
+ .keyexp = {
+ aes_keyexp_128_avx512,
+ aes_keyexp_192_avx512,
+ aes_keyexp_256_avx512,
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
+ }
+ }
+ }
+};
+
+
+#endif /* _AESNI_MB_OPS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 00000000..03ee88be
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,1046 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash Function pointer to calculate digest on ipad/opad
+ * @param ipad Inner pad output byte array
+ * @param opad Outer pad output byte array
+ * @param hkey Authentication key
+ * @param hkey_len Authentication key length
+ * @param blocksize Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+ uint8_t *ipad, uint8_t *opad,
+ uint8_t *hkey, uint16_t hkey_len,
+ uint16_t blocksize)
+{
+ unsigned i, length;
+
+ uint8_t ipad_buf[blocksize] __rte_aligned(16);
+ uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+ /* Setup inner and outer pads */
+ memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+ memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+ /* XOR hash key with inner and outer pads */
+ length = hkey_len > blocksize ? blocksize : hkey_len;
+
+ for (i = 0; i < length; i++) {
+ ipad_buf[i] ^= hkey[i];
+ opad_buf[i] ^= hkey[i];
+ }
+
+ /* Compute partial hashes */
+ (*one_block_hash)(ipad_buf, ipad);
+ (*one_block_hash)(opad_buf, opad);
+
+ /* Clean up stack */
+ memset(ipad_buf, 0, blocksize);
+ memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return AESNI_MB_OP_AEAD_CIPHER_HASH;
+ else
+ return AESNI_MB_OP_AEAD_HASH_CIPHER;
+ }
+ }
+
+ return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ hash_one_block_t hash_oneblock_fn;
+
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
+ return -1;
+ }
+
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
+ /* Set Authentication Parameters */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ sess->auth.algo = AES_XCBC;
+ (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+ sess->auth.xcbc.k1_expanded,
+ sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+ return 0;
+ }
+
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ sess->auth.algo = MD5;
+ hash_oneblock_fn = mb_ops->aux.one_block.md5;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ sess->auth.algo = SHA1;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ sess->auth.algo = SHA_224;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sess->auth.algo = SHA_256;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ sess->auth.algo = SHA_384;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.algo = SHA_512;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
+ return -ENOTSUP;
+ }
+
+ /* Calculate Authentication precomputes */
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t is_aes = 0;
+ uint8_t is_3DES = 0;
+ aes_keyexp_t aes_keyexp_fn;
+
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+ return -EINVAL;
+ }
+
+ /* Select cipher direction */
+ switch (xform->cipher.op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
+ return -EINVAL;
+ }
+
+ /* Select cipher mode */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.mode = CBC;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = CNTR;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ is_aes = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.mode = DES;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_DES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.mode = DES3;
+ is_3DES = 1;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+ if (is_aes) {
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ } else if (is_3DES) {
+ uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+ sess->cipher.exp_3des_keys.key[1],
+ sess->cipher.exp_3des_keys.key[2]};
+
+ switch (xform->cipher.key.length) {
+ case 24:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+ des_key_schedule(keys[2], xform->cipher.key.data+16);
+
+ /* Initialize keys - 24 bytes: [K1-K2-K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+ break;
+ case 16:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+
+ /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ case 8:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+
+ /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ sess->cipher.key_length_in_bytes = 24;
+#else
+ sess->cipher.key_length_in_bytes = 8;
+#endif
+ } else {
+ if (xform->cipher.key.length != 8) {
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+ sess->cipher.key_length_in_bytes = 8;
+
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+ xform->cipher.key.data);
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+ xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ aes_keyexp_t aes_keyexp_fn;
+
+ switch (xform->aead.op) {
+ case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+ break;
+ case RTE_CRYPTO_AEAD_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
+ return -EINVAL;
+ }
+
+ switch (xform->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ sess->cipher.mode = CCM;
+ sess->auth.algo = AES_CCM;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->aead.iv.offset;
+ sess->iv.length = xform->aead.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->aead.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ switch (aesni_mb_get_chain_order(xform)) {
+ case AESNI_MB_OP_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_AEAD_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->auth.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
+ case AESNI_MB_OP_AEAD_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->auth.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
+ return ret;
+ }
+
+ ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+ return ret;
+ }
+
+ if (aead_xform) {
+ ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
+ aead_xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_mb_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct aesni_mb_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)_sess_private_data;
+
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param job JOB_AES_HMAC structure to fill
+ * @param m mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+#define SPDK_CRYPTO_HACK
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+#ifdef SPDK_CRYPTO_HACK
+ struct rte_mbuf *m_src = op->sym->m_src;
+ struct rte_mbuf *m_dst = op->sym->m_dst;
+#else
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+#endif
+
+ struct aesni_mb_session *session;
+ uint16_t m_offset = 0;
+
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+
+ /* Set crypto operation */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+
+
+
+
+ /* Set authentication parameters */
+ job->hash_alg = session->auth.algo;
+ if (job->hash_alg == AES_XCBC) {
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
+ } else if (job->hash_alg == AES_CCM) {
+ job->u.CCM.aad = op->sym->aead.aad.data + 18;
+ job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
+ } else {
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+ }
+
+#ifdef SPDK_CRYPTO_HACK
+ if (!op->sym->m_dst) {
+ m_dst = m_src;
+ }
+ m_offset = op->sym->cipher.data.offset;
+#else
+ /* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL) {
+ AESNI_MB_LOG(ERR, "failed to allocate space in destination "
+ "mbuf for source data");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ if (job->hash_alg == AES_CCM)
+ m_offset = op->sym->aead.data.offset;
+ else
+ m_offset = op->sym->cipher.data.offset;
+ }
+#endif
+
+ /* Set digest output location */
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
+ } else {
+ if (job->hash_alg == AES_CCM)
+ job->auth_tag_output = op->sym->aead.digest.data;
+ else
+ job->auth_tag_output = op->sym->auth.digest.data;
+ }
+
+ /*
+ * Multi-buffer library current only support returning a truncated
+ * digest length as specified in the relevant IPsec RFCs
+ */
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
+ job->auth_tag_output_len_in_bytes =
+ get_truncated_digest_byte_length(job->hash_alg);
+ else
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
+
+
+ /* Set IV parameters */
+
+ job->iv_len_in_bytes = session->iv.length;
+
+ /* Data Parameter */
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+
+#ifdef SPDK_CRYPTO_HACK
+ if (!op->sym->m_dst) {
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+ } else {
+ job->dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+ }
+#else
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+#endif
+
+ if (job->hash_alg == AES_CCM) {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset + 1);
+ } else {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ }
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
+ return 0;
+}
+
+static inline void
+verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
+ struct rte_crypto_op *op) {
+ /* Verify digest if required */
+ if (job->hash_alg == AES_CCM) {
+ if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+ struct aesni_mb_session *sess = get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+
+ if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(qp, job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op = NULL;
+ unsigned processed_jobs = 0;
+
+ while (job != NULL) {
+ op = post_process_mb_job(qp, job);
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
+ qp->stats.dequeue_err_count++;
+ break;
+ }
+ if (processed_jobs == nb_ops)
+ break;
+
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ }
+
+ return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
+ return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = queue_pair;
+
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
+
+ int retval, processed_jobs = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ uint8_t digest_idx = qp->digest_idx;
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ if (unlikely(job == NULL)) {
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
+
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ }
+
+ retval = set_mb_job_params(job, qp, op, &digest_idx);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job, op);
+ }
+
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+
+ /*
+ * If submit returns a processed job then handle it,
+ * before submitting subsequent jobs
+ */
+ if (job)
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ } while (processed_jobs < nb_ops);
+
+ qp->digest_idx = digest_idx;
+
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_mb_private *internals;
+ enum aesni_mb_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
+ return -ENODEV;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_MB_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_MB_AVX;
+ else
+ vector_mode = RTE_AESNI_MB_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_MB_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_MB_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
+ default:
+ break;
+ }
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
+
+ return 0;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_mb_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(vdev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+ aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 00000000..e5e49547
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,621 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 14,
+ .max = 14,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES XCBC HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 46,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_mb_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_mb_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_mb_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+ const char *str, unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+ char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ unsigned int n = snprintf(ring_name, sizeof(ring_name),
+ "%s_%s",
+ qp->name, str);
+
+ if (n >= sizeof(ring_name))
+ return NULL;
+
+ r = rte_ring_lookup(ring_name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
+ ring_name);
+ return r;
+ }
+
+ AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
+ ring_name);
+ return NULL;
+ }
+
+ return rte_ring_create(ring_name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct aesni_mb_qp *qp = NULL;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_mb_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+
+ qp->op_fns = &job_ops[internals->vector_mode];
+
+ qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+ "ingress", qp_conf->nb_descriptors, socket_id);
+ if (qp->ingress_queue == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "digest_mp_%u_%u", dev->data->dev_id, qp_id);
+
+ /* Initialise multi-buffer manager */
+ (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ AESNI_MB_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ AESNI_MB_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
+ AESNI_MB_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+ .dev_configure = aesni_mb_pmd_config,
+ .dev_start = aesni_mb_pmd_start,
+ .dev_stop = aesni_mb_pmd_stop,
+ .dev_close = aesni_mb_pmd_close,
+
+ .stats_get = aesni_mb_pmd_stats_get,
+ .stats_reset = aesni_mb_pmd_stats_reset,
+
+ .dev_infos_get = aesni_mb_pmd_info_get,
+
+ .queue_pair_setup = aesni_mb_pmd_qp_setup,
+ .queue_pair_release = aesni_mb_pmd_qp_release,
+ .queue_pair_count = aesni_mb_pmd_qp_count,
+
+ .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_mb_pmd_sym_session_configure,
+ .sym_session_clear = aesni_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
new file mode 100644
index 00000000..1d7ea852
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
+ */
+
+#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
+#define _RTE_AESNI_MB_PMD_PRIVATE_H_
+
+#include "aesni_mb_ops.h"
+
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< AES-NI Multi buffer PMD device name */
+
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+static const unsigned auth_blocksize[] = {
+ [MD5] = 64,
+ [SHA1] = 64,
+ [SHA_224] = 64,
+ [SHA_256] = 64,
+ [SHA_384] = 128,
+ [SHA_512] = 128,
+ [AES_XCBC] = 16,
+ [AES_CCM] = 16,
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_auth_algo_blocksize(JOB_HASH_ALG algo)
+{
+ return auth_blocksize[algo];
+}
+
+static const unsigned auth_truncated_digest_byte_lengths[] = {
+ [MD5] = 12,
+ [SHA1] = 12,
+ [SHA_224] = 14,
+ [SHA_256] = 16,
+ [SHA_384] = 24,
+ [SHA_512] = 32,
+ [AES_XCBC] = 12,
+ [AES_CMAC] = 16,
+ [AES_CCM] = 8,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_truncated_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned auth_digest_byte_lengths[] = {
+ [MD5] = 16,
+ [SHA1] = 20,
+ [SHA_224] = 28,
+ [SHA_256] = 32,
+ [SHA_384] = 48,
+ [SHA_512] = 64,
+ [AES_XCBC] = 16,
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the output digest size in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_digest_byte_lengths[algo];
+}
+
+enum aesni_mb_operation {
+ AESNI_MB_OP_HASH_CIPHER,
+ AESNI_MB_OP_CIPHER_HASH,
+ AESNI_MB_OP_HASH_ONLY,
+ AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_AEAD_HASH_CIPHER,
+ AESNI_MB_OP_AEAD_CIPHER_HASH,
+ AESNI_MB_OP_NOT_SUPPORTED
+};
+
+/** private data structure for each virtual AESNI device */
+struct aesni_mb_private {
+ enum aesni_mb_vector_mode vector_mode;
+ /**< CPU vector instruction set mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** AESNI Multi buffer queue pair */
+struct aesni_mb_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ const struct aesni_mb_op_fns *op_fns;
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+ MB_MGR mb_mgr;
+ /**< Multi-buffer instance */
+ struct rte_ring *ingress_queue;
+ /**< Ring for placing operations ready for processing */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t digest_idx;
+ /**< Index of the next slot to be used in temp_digests,
+ * to store the digest for a given operation
+ */
+ uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
+ /**< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+ JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ /** Cipher Parameters */
+ struct {
+ /** Cipher direction - encrypt / decrypt */
+ JOB_CIPHER_DIRECTION direction;
+ /** Cipher mode - CBC / Counter */
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+ };
+ /**< Expanded AES keys - Allocating space to
+ * contain the maximum expanded key size which
+ * is 240 bytes for 256 bit AES, calculate by:
+ * ((key size (bytes)) *
+ * ((number of rounds) + 1))
+ */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ union {
+ struct {
+ uint8_t inner[128] __rte_aligned(16);
+ /**< inner pad */
+ uint8_t outer[128] __rte_aligned(16);
+ /**< outer pad */
+ } pads;
+ /**< HMAC Authentication pads -
+ * allocating space for the maximum pad
+ * size supported which is 128 bytes for
+ * SHA512
+ */
+
+ struct {
+ uint32_t k1_expanded[44] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint8_t k2[16] __rte_aligned(16);
+ /**< k2. */
+ uint8_t k3[16] __rte_aligned(16);
+ /**< k3. */
+ } xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
+ /**< Expanded XCBC authentication keys */
+ };
+ /** digest size */
+ uint16_t digest_len;
+
+ } auth;
+ struct {
+ /** AAD data length */
+ uint16_t aad_len;
+ } aead;
+} __rte_cache_aligned;
+
+
+/**
+ *
+ */
+extern int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
+
+
+
+#endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/Makefile b/src/spdk/dpdk/drivers/crypto/armv8/Makefile
new file mode 100644
index 00000000..e862af72
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(ARMV8_CRYPTO_LIB_PATH),)
+$(error "Please define ARMV8_CRYPTO_LIB_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_armv8_version.map
+
+# external library dependencies
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
+LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644
index 00000000..9d15fee5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -0,0 +1,851 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be described by a set of numbers:
+ * - order: order of operations (cipher, auth) or (auth, cipher)
+ * - direction: encryption or decryption
+ * - calg: cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg: authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl: cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ * +--+CRYPTO_FUNC
+ * +--+ENC|
+ * +--+CA|
+ * | +--+DEC
+ * ORDER|
+ * | +--+ENC
+ * +--+AC|
+ * +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_AUTH_MAX: max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define KEYL(keyl) (ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define CIPH_AES_CBC RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define AUTH_SHA1_HMAC RTE_CRYPTO_AUTH_SHA1_HMAC
+#define AUTH_SHA256_HMAC RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt: cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt: cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt: authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt: authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+ &crypto_op_ca_encrypt,
+ &crypto_op_ca_decrypt,
+ NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+ &crypto_op_ac_encrypt,
+ &crypto_op_ac_decrypt,
+ NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth: cipher first, authenticate after
+ * crypto_auth_cipher: authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+ crypto_cipher_auth,
+ crypto_auth_cipher,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl) \
+({ \
+ crypto_func_tbl_t *func_tbl = \
+ (crypto_chain_order[(order)])[(cop)]; \
+ \
+ ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt: keys for encryption
+ * crypto_key_sched_decrypt: keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+ &crypto_key_sched_encrypt,
+ &crypto_key_sched_decrypt,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_KEY_SCHED(cop, calg, keyl) \
+({ \
+ crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)]; \
+ \
+ ((*ks_tbl)[(calg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+ /*
+ * This driver currently covers only chained operations.
+ * Ignore only cipher or only authentication operations
+ * or chains longer than 2 xform structures.
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+ }
+
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ size_t i;
+
+ /* Generate i_key_pad and o_key_pad */
+ memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+ rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+ rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ /*
+ * XOR key with IPAD/OPAD values to obtain i_key_pad
+ * and o_key_pad.
+ * Byte-by-byte operation may seem to be the less efficient
+ * here but in fact it's the opposite.
+ * The result ASM code is likely operate on NEON registers
+ * (load auth key to Qx, load IPAD/OPAD to multiple
+ * elements of Qy, eor 128 bits at once).
+ */
+ for (i = 0; i < SHA_BLOCK_MAX; i++) {
+ sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+ sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+ }
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t partial[64] = { 0 };
+ int error;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ crypto_key_sched_t cipher_key_sched;
+
+ cipher_key_sched = sess->cipher.key_sched;
+ if (likely(cipher_key_sched != NULL)) {
+ /* Set up cipher session key */
+ cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ enum armv8_crypto_chain_order order;
+ enum armv8_crypto_cipher_operation cop;
+ enum rte_crypto_cipher_algorithm calg;
+ enum rte_crypto_auth_algorithm aalg;
+
+ /* Validate and prepare scratch order of combined operations */
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ order = sess->chain_order;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ /* Select cipher direction */
+ sess->cipher.direction = cipher_xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = cipher_xform->cipher.key.length;
+ /* Set cipher direction */
+ cop = sess->cipher.direction;
+ /* Set cipher algorithm */
+ calg = cipher_xform->cipher.algo;
+
+ /* Select cipher algo */
+ switch (calg) {
+ /* Cover supported cipher algorithms */
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = calg;
+ /* IV len is always 16 bytes (block size) for AES CBC */
+ sess->cipher.iv.length = 16;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ /* Select auth generate/verify */
+ sess->auth.operation = auth_xform->auth.op;
+
+ /* Select auth algo */
+ switch (auth_xform->auth.algo) {
+ /* Cover supported hash algorithms */
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+ aalg = auth_xform->auth.algo;
+ sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ /* Set the digest length */
+ sess->auth.digest_length = auth_xform->auth.digest_length;
+
+ /* Verify supported key lengths and extract proper algorithm */
+ switch (cipher_xform->cipher.key.length << 3) {
+ case 128:
+ sess->crypto_func =
+ CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+ sess->cipher.key_sched =
+ CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+ break;
+ case 192:
+ case 256:
+ /* These key lengths are not supported yet */
+ default: /* Fall through */
+ sess->crypto_func = NULL;
+ sess->cipher.key_sched = NULL;
+ return -ENOTSUP;
+ }
+
+ if (unlikely(sess->crypto_func == NULL)) {
+ /*
+ * If we got here that means that there must be a bug
+ * in the algorithms selection above. Nevertheless keep
+ * it here to catch bug immediately and avoid NULL pointer
+ * dereference in OPs processing.
+ */
+ ARMV8_CRYPTO_LOG_ERR(
+ "No appropriate crypto function for given parameters");
+ return -EINVAL;
+ }
+
+ /* Set up cipher session prerequisites */
+ if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+ return -EINVAL;
+
+ /* Set up authentication session prerequisites */
+ if (auth_set_prerequisites(sess, auth_xform) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ bool is_chained_op;
+ int ret;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = armv8_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ is_chained_op = true;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ is_chained_op = true;
+ break;
+ default:
+ is_chained_op = false;
+ return -ENOTSUP;
+ }
+
+ /* Set IV offset */
+ sess->cipher.iv.offset = cipher_xform->cipher.iv.offset;
+
+ if (is_chained_op) {
+ ret = armv8_crypto_set_session_chained_parameters(sess,
+ cipher_xform, auth_xform);
+ if (unlikely(ret != 0)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Invalid/unsupported chained (cipher/auth) parameters");
+ return ret;
+ }
+ } else {
+ ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/** Provide session for operation */
+static inline struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct armv8_crypto_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL)) {
+ sess = (struct armv8_crypto_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ }
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct armv8_crypto_session *)_sess_private_data;
+
+ if (unlikely(armv8_crypto_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static inline void
+process_armv8_chained_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ crypto_func_t crypto_func;
+ crypto_arg_t arg;
+ struct rte_mbuf *m_asrc, *m_adst;
+ uint8_t *csrc, *cdst;
+ uint8_t *adst, *asrc;
+ uint64_t clen, alen;
+ int error;
+
+ clen = op->sym->cipher.data.length;
+ alen = op->sym->auth.data.length;
+
+ csrc = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ cdst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ m_asrc = m_adst = mbuf_dst;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ m_asrc = mbuf_src;
+ m_adst = mbuf_dst;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+ asrc = rte_pktmbuf_mtod_offset(m_asrc, uint8_t *,
+ op->sym->auth.data.offset);
+
+ switch (sess->auth.mode) {
+ case ARMV8_CRYPTO_AUTH_AS_AUTH:
+ /* Nothing to do here, just verify correct option */
+ break;
+ case ARMV8_CRYPTO_AUTH_AS_HMAC:
+ arg.digest.hmac.key = sess->auth.hmac.key;
+ arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+ arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ adst = op->sym->auth.digest.data;
+ if (adst == NULL) {
+ adst = rte_pktmbuf_mtod_offset(m_adst,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+ } else {
+ adst = qp->temp_digest;
+ }
+
+ arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher.iv.offset);
+ arg.cipher.key = sess->cipher.key.data;
+ /* Acquire combined mode function */
+ crypto_func = sess->crypto_func;
+ ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+ error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg);
+ if (error != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(adst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ }
+}
+
+/** Process crypto operation for mbuf */
+static inline int
+process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+ process_armv8_chained_op(qp, op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ERROR))
+ return -1;
+
+ return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_session *sess;
+ struct armv8_crypto_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ qp->stats.enqueued_count += retval;
+
+ return retval;
+
+enqueue_err:
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ if (ops[i] != NULL)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->stats.enqueue_err_count++;
+ return retval;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct armv8_crypto_private *internals;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for SHA instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+ !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "SHA1/SHA2 instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for Advance SIMD instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Advanced SIMD instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_NEON |
+ RTE_CRYPTODEV_FF_CPU_ARM_CE;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+
+init_error:
+ ARMV8_CRYPTO_LOG_ERR(
+ "driver %s: cryptodev_armv8_crypto_create failed",
+ init_params->name);
+
+ cryptodev_armv8_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct armv8_crypto_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_armv8_crypto_create(name, vdev, &init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing ARMv8 crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver armv8_crypto_pmd_drv = {
+ .probe = cryptodev_armv8_crypto_init,
+ .remove = cryptodev_armv8_crypto_uninit
+};
+
+static struct cryptodev_driver armv8_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644
index 00000000..ae03117e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities
+ armv8_crypto_pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct armv8_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = armv8_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct armv8_crypto_qp *qp)
+{
+ unsigned int n;
+
+ n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ARMV8_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ ARMV8_CRYPTO_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct armv8_crypto_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ armv8_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = armv8_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+ .dev_configure = armv8_crypto_pmd_config,
+ .dev_start = armv8_crypto_pmd_start,
+ .dev_stop = armv8_crypto_pmd_stop,
+ .dev_close = armv8_crypto_pmd_close,
+
+ .stats_get = armv8_crypto_pmd_stats_get,
+ .stats_reset = armv8_crypto_pmd_stats_reset,
+
+ .dev_infos_get = armv8_crypto_pmd_info_get,
+
+ .queue_pair_setup = armv8_crypto_pmd_qp_setup,
+ .queue_pair_release = armv8_crypto_pmd_qp_release,
+ .queue_pair_count = armv8_crypto_pmd_qp_count,
+
+ .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = armv8_crypto_pmd_sym_session_configure,
+ .sym_session_clear = armv8_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644
index 00000000..7feb021d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con) \
+do { \
+ if (!(con)) { \
+ rte_panic("%s(): " \
+ con "condition failed, line %u", __func__); \
+ } \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY 8 /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+
+/* Maximum length for digest (SHA-256 needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+ ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+ ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+ ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+ ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+ ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+ ARMV8_CRYPTO_AUTH_AS_AUTH,
+ ARMV8_CRYPTO_AUTH_AS_HMAC,
+ ARMV8_CRYPTO_AUTH_AS_CIPHER,
+ ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+ ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+ uint8_t *, uint8_t *, uint64_t,
+ crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+ enum armv8_crypto_chain_order chain_order;
+ /**< chain order mode */
+ crypto_func_t crypto_func;
+ /**< cryptographic function to use for this session */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ struct {
+ uint8_t data[256];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ crypto_key_sched_t key_sched;
+ /**< Key schedule function */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum armv8_crypto_auth_mode mode;
+ /**< auth operation mode */
+
+ union {
+ struct {
+ /* Add data if needed */
+ } auth;
+
+ struct {
+ uint8_t i_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< inner pad (max supported block length) */
+ uint8_t o_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< outer pad (max supported block length) */
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
+ } hmac;
+ };
+ uint16_t digest_length;
+ /* Digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+ struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map b/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map
new file mode 100644
index 00000000..1f84b68a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/armv8/rte_pmd_armv8_version.map
@@ -0,0 +1,3 @@
+DPDK_17.02 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/Makefile b/src/spdk/dpdk/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..19ae9153
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..6984913f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_sym_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .sym_session_get_size = ccp_pmd_sym_session_get_size,
+ .sym_session_configure = ccp_pmd_sym_session_configure,
+ .sym_session_clear = ccp_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..79752f68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/meson.build b/src/spdk/dpdk/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c b/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..92d8a955
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map b/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile
new file mode 100644
index 00000000..da3d8f84
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2016 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
+endif
+endif
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_sec.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D _GNU_SOURCE
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+CFLAGS += -Wno-implicit-fallthrough
+endif
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
new file mode 100644
index 00000000..2a3c61c6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -0,0 +1,2931 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_common.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+#include <fsl_dpseci.h>
+#include <fsl_mc_sys.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+/* Required types */
+typedef uint64_t dma_addr_t;
+
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define FSL_VENDOR_ID 0x1957
+#define FSL_DEVICE_ID 0x410
+#define FSL_SUBSYSTEM_SEC 1
+#define FSL_MC_DPSECI_DEVID 3
+
+#define NO_PREFETCH 0
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS 32000
+#define FLE_POOL_BUF_SIZE 256
+#define FLE_POOL_CACHE_SIZE 512
+#define FLE_SG_MEM_SIZE 2048
+#define SEC_FLC_DHR_OUTBOUND -114
+#define SEC_FLC_DHR_INBOUND 0
+
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+
+static uint8_t cryptodev_driver_id;
+
+int dpaa2_logtype_sec;
+
+static inline int
+build_proto_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *mbuf = sym_op->m_src;
+
+ if (likely(bpid < MAX_BPID))
+ DPAA2_SET_FD_BPID(fd, bpid);
+ else
+ DPAA2_SET_FD_IVP(fd);
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+
+ /* save physical address of mbuf */
+ op->sym->aead.digest.phys_addr = mbuf->buf_iova;
+ mbuf->buf_iova = (size_t)op;
+
+ return 0;
+}
+
+static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sess->digest_length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+ auth_only_len);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data, icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ struct rte_mbuf *dst;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sess->digest_length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ dst->data_off - auth_only_len);
+ sge->length = sym_op->aead.data.length + auth_only_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->iv.length + auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->aead.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->digest_length +
+ sess->iv.length +
+ auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+
+ return 0;
+}
+
+static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_authenc_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ struct rte_mbuf *dst;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ dst->data_off);
+ sge->length = sym_op->cipher.data.length;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sess->iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->auth.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sess->digest_length +
+ sess->iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ return 0;
+}
+
+static inline int build_auth_sg_fd(
+ dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd,
+ __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ struct rte_mbuf *mbuf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ mbuf = sym_op->m_src;
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ /* sg FD */
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ op_fle->length = sess->digest_length;
+
+ /* i/p fle */
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ /* i/p segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ if (sess->dir == DIR_ENC) {
+ /* Digest calculation case */
+ sge->length -= sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length;
+ } else {
+ /* Digest verification case */
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
+build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ int retval;
+
+ PMD_INIT_FUNC_TRACE();
+
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ }
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ fle->length = sess->digest_length;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ fle++;
+
+ if (sess->dir == DIR_ENC) {
+ DPAA2_SET_FLE_ADDR(fle,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+ fle->length = sym_op->auth.data.length;
+ } else {
+ sge = fle + 2;
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+ sess->digest_length);
+ sge->length = sym_op->auth.data.length;
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ DPAA2_SET_FLE_FIN(sge);
+ }
+ DPAA2_SET_FLE_FIN(fle);
+
+ return 0;
+}
+
+static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+ op_fle->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+
+ /* o/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ /* i/p fle */
+ mbuf = sym_op->m_src;
+ sge++;
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+ /* i/p IV */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ DPAA2_SET_FLE_OFFSET(sge, 0);
+ sge->length = sess->iv.length;
+
+ sge++;
+
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+ /* sg fd */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ int retval;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ struct rte_mbuf *dst;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+
+ flc = &priv->flc_desc[0].flc;
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+ sess->iv.length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+ dst->data_off);
+
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ fle++;
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ sge->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ return 0;
+}
+
+static inline int
+build_sec_fd(struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ int ret = -1;
+ dpaa2_sec_session *sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ sess = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ else
+ return -1;
+
+ /* Segmented buffer */
+ if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ } else {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ }
+ return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ /*todo - need to support multiple buffer pools */
+ uint16_t bpid;
+ struct rte_mempool *mb_pool;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
+ return 0;
+ }
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ /*Clear the unused FD fields before sending*/
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+ mb_pool = (*ops)->sym->m_src->pool;
+ bpid = mempool_to_bpid(mb_pool);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+ if (ret) {
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
+ goto skip_tx;
+ }
+ ops++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ NULL,
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_ops -= frames_to_send;
+ }
+skip_tx:
+ dpaa2_qp->tx_vq.tx_pkts += num_tx;
+ dpaa2_qp->tx_vq.err_pkts += nb_ops;
+ return num_tx;
+}
+
+static inline struct rte_crypto_op *
+sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
+{
+ struct rte_crypto_op *op;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ uint16_t diff = 0;
+ dpaa2_sec_session *sess_priv;
+
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
+ op->sym->aead.digest.phys_addr = 0L;
+
+ sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (sess_priv->dir == DIR_ENC)
+ mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
+ else
+ mbuf->data_off += SEC_FLC_DHR_INBOUND;
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
+
+ return op;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
+{
+ struct qbman_fle *fle;
+ struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
+ struct rte_mbuf *dst, *src;
+
+ if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
+ return sec_simple_fd_to_mbuf(fd, driver_id);
+
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+
+ if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+ /* TODO complete it. */
+ DPAA2_SEC_ERR("error: non inline buffer");
+ return NULL;
+ }
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
+
+ /* Prefeth op */
+ src = op->sym->m_src;
+ rte_prefetch0(src);
+
+ if (op->sym->m_dst) {
+ dst = op->sym->m_dst;
+ rte_prefetch0(dst);
+ } else
+ dst = src;
+
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ /* free the fle memory */
+ if (likely(rte_pktmbuf_is_contiguous(src))) {
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+ } else
+ rte_free((void *)(fle-1));
+
+ return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct rte_cryptodev *dev =
+ (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+ dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_ops);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+ 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely(
+ (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+ continue;
+ }
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
+
+ if (unlikely(fd->simple.frc)) {
+ /* TODO Parse SEC errors */
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct dpaa2_sec_qp *qp =
+ (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qp->rx_vq.q_storage) {
+ dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+ rte_free(qp->rx_vq.q_storage);
+ }
+ rte_free(qp);
+
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct dpaa2_sec_qp *qp;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ DPAA2_SEC_INFO("QP already setup");
+ return 0;
+ }
+
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+ qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp) {
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
+ return -1;
+ }
+
+ qp->rx_vq.dev = dev;
+ qp->tx_vq.dev = dev;
+ qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp->rx_vq.q_storage) {
+ DPAA2_SEC_ERR("malloc failed for q_storage");
+ return -1;
+ }
+ memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+ if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
+ return -1;
+ }
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
+ retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ return retcode;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa2_sec_session);
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC CIPHER only one descriptor is required. */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_NULL:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ xform->cipher.algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ xform->cipher.algo);
+ goto error_out;
+ }
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
+ goto error_out;
+ }
+ flc->dhr = 0;
+ flc->bpv0 = 0x1;
+ flc->mode_bits = 0x8000;
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC AUTH three descriptors are required for various stages */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + 3 *
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL) {
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = xform->auth.key.length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = xform->auth.digest_length;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
+ goto error_out;
+ }
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+ 1, 0, &authdata, !session->dir,
+ session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
+
+ return 0;
+
+error_out:
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for aead key");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (size_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session->ext_params.aead_ctxt.auth_cipher_text) {
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+ } else {
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+ }
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ session->digest_length = auth_xform->digest_length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto error_out;
+ }
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = cipherdata.keylen;
+ priv->flc_desc[0].desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[2], 2);
+
+ if (err < 0) {
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[2] & 1) {
+ cipherdata.key_type = RTA_DATA_IMM;
+ } else {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (priv->flc_desc[0].desc[2] & (1 << 1)) {
+ authdata.key_type = RTA_DATA_IMM;
+ } else {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+ priv->flc_desc[0].desc[2] = 0;
+
+ if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+ bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+ 0, &cipherdata, &authdata,
+ session->iv.length,
+ ctxt->auth_only_len,
+ session->digest_length,
+ session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+ } else {
+ DPAA2_SEC_ERR("Hash before cipher not supported");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ dpaa2_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ DPAA2_SEC_ERR("Invalid session struct");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_CIPHER;
+ dpaa2_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_AUTH;
+ dpaa2_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa2_sec_aead_init(dev, xform, session);
+
+ } else {
+ DPAA2_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ int bufsize;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->ctxt_type = DPAA2_SEC_IPSEC;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto out;
+ }
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ struct ip ip4_hdr;
+
+ flc->dhr = SEC_FLC_DHR_OUTBOUND;
+ ip4_hdr.ip_v = IPVERSION;
+ ip4_hdr.ip_hl = 5;
+ ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
+ ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ ip4_hdr.ip_id = 0;
+ ip4_hdr.ip_off = 0;
+ ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_sum = 0;
+ ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
+ sizeof(struct ip));
+
+ /* For Sec Proto only one descriptor is required. */
+ memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
+ encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ encap_pdb.spi = ipsec_xform->spi;
+ encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
+ 1, 0, &encap_pdb,
+ (uint8_t *)&ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ flc->dhr = SEC_FLC_DHR_INBOUND;
+ memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
+ 1, 0, &decap_pdb, &cipherdata, &authdata);
+ } else
+ goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa2_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ DPAA2_SEC_ERR("Failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa2_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+static int
+dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ DPAA2_SEC_ERR("Failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_attr attr;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ struct dpseci_rx_queue_attr rx_attr;
+ struct dpseci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&attr, 0, sizeof(struct dpseci_attr));
+
+ ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
+ goto get_attr_failure;
+ }
+ ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
+ goto get_attr_failure;
+ }
+ for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->rx_vq;
+ dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &rx_attr);
+ dpaa2_q->fqid = rx_attr.fqid;
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
+ }
+ for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->tx_vq;
+ dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &tx_attr);
+ dpaa2_q->fqid = tx_attr.fqid;
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
+ }
+
+ return 0;
+get_attr_failure:
+ dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
+ priv->hw_id);
+ return;
+ }
+
+ ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret < 0) {
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
+ return;
+ }
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Function is reverse of dpaa2_sec_dev_init.
+ * It does the following:
+ * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+ * 2. Close the DPSECI device
+ * 3. Free the allocated resources.
+ */
+
+ /*Close the device at underlying layer*/
+ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
+ return -1;
+ }
+
+ /*Free the allocated memory for ethernet private data and dpseci*/
+ priv->hw = NULL;
+ rte_free(dpseci);
+
+ return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa2_sec_capabilities;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static
+void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_sec_counters counters = {0};
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
+ stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
+ stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
+ }
+
+ ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+ &counters);
+ if (ret) {
+ DPAA2_SEC_ERR("SEC counters failed");
+ } else {
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
+ counters.dequeued_requests,
+ counters.ob_enc_requests,
+ counters.ib_dec_requests,
+ counters.ob_enc_bytes,
+ counters.ob_prot_bytes,
+ counters.ib_dec_bytes,
+ counters.ib_valid_bytes);
+ }
+}
+
+static
+void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->tx_vq.rx_pkts = 0;
+ qp[i]->tx_vq.tx_pkts = 0;
+ qp[i]->tx_vq.err_pkts = 0;
+ qp[i]->rx_vq.rx_pkts = 0;
+ qp[i]->rx_vq.tx_pkts = 0;
+ qp[i]->rx_vq.err_pkts = 0;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa2_sec_dev_configure,
+ .dev_start = dpaa2_sec_dev_start,
+ .dev_stop = dpaa2_sec_dev_stop,
+ .dev_close = dpaa2_sec_dev_close,
+ .dev_infos_get = dpaa2_sec_dev_infos_get,
+ .stats_get = dpaa2_sec_stats_get,
+ .stats_reset = dpaa2_sec_stats_reset,
+ .queue_pair_setup = dpaa2_sec_queue_pair_setup,
+ .queue_pair_release = dpaa2_sec_queue_pair_release,
+ .queue_pair_count = dpaa2_sec_queue_pair_count,
+ .sym_session_get_size = dpaa2_sec_sym_session_get_size,
+ .sym_session_configure = dpaa2_sec_sym_session_configure,
+ .sym_session_clear = dpaa2_sec_sym_session_clear,
+};
+
+static const struct rte_security_capability *
+dpaa2_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa2_sec_security_cap;
+}
+
+struct rte_security_ops dpaa2_sec_security_ops = {
+ .session_create = dpaa2_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa2_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa2_sec_capabilities_get
+};
+
+static int
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_free(dev->security_ctx);
+
+ rte_mempool_free(internals->fle_pool);
+
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa2_sec_dev_private *internals;
+ struct rte_device *dev = cryptodev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct rte_security_ctx *security_instance;
+ struct fsl_mc_io *dpseci;
+ uint16_t token;
+ struct dpseci_attr attr;
+ int retcode, hw_id;
+ char str[20];
+
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+ if (dpaa2_dev == NULL) {
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
+ return -1;
+ }
+ hw_id = dpaa2_dev->object_id;
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DPAA2_SEC_DEBUG("Device already init by primary process");
+ return 0;
+ }
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa2_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
+ /*Open the rte device via MC and save the handle for further use*/
+ dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
+ sizeof(struct fsl_mc_io), 0);
+ if (!dpseci) {
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
+ return -1;
+ }
+ dpseci->regs = rte_mcp_ptr_list[0];
+
+ retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
+ if (retcode != 0) {
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
+ if (retcode != 0) {
+ DPAA2_SEC_ERR(
+ "Cannot get dpsec device attributed: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
+
+ internals->max_nb_queue_pairs = attr.num_tx_queues;
+ cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
+ internals->hw = dpseci;
+ internals->token = token;
+
+ sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+ internals->fle_pool = rte_mempool_create((const char *)str,
+ FLE_POOL_NUM_BUFS,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->fle_pool) {
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
+ goto init_error;
+ }
+
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
+ return 0;
+
+init_error:
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
+
+ /* dpaa2_sec_uninit(crypto_dev_name); */
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa2_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa2_dev->cryptodev = cryptodev;
+ cryptodev->device = &dpaa2_dev->device;
+ cryptodev->device->driver = &dpaa2_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa2_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa2_dev->cryptodev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa2_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_CRYPTO,
+ .driver = {
+ .name = "DPAA2 SEC PMD"
+ },
+ .probe = cryptodev_dpaa2_sec_probe,
+ .remove = cryptodev_dpaa2_sec_remove,
+};
+
+static struct cryptodev_driver dpaa2_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
new file mode 100644
index 00000000..8a990442
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA2_SEC_LOGS_H_
+#define _DPAA2_SEC_LOGS_H_
+
+extern int dpaa2_logtype_sec;
+
+#define DPAA2_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA2_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>")
+
+#define DPAA2_SEC_INFO(fmt, args...) \
+ DPAA2_SEC_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_ERR(fmt, args...) \
+ DPAA2_SEC_LOG(ERR, fmt, ## args)
+#define DPAA2_SEC_WARN(fmt, args...) \
+ DPAA2_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
+ DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_SEC_DP_INFO(fmt, args...) \
+ DPAA2_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_DP_WARN(fmt, args...) \
+ DPAA2_SEC_DP_LOG(WARNING, fmt, ## args)
+
+
+#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
new file mode 100644
index 00000000..d015be1e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+/**< NXP DPAA2 - SEC PMD device name */
+
+#define MAX_QUEUES 64
+#define MAX_DESC_SIZE 64
+/** private data structure for each DPAA2_SEC device */
+struct dpaa2_sec_dev_private {
+ void *mc_portal; /**< MC Portal for configuring this device */
+ void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
+ int32_t hw_id; /**< An unique ID of this device instance */
+ int32_t vfio_fd; /**< File descriptor received via VFIO */
+ uint16_t token; /**< Token required by DPxxx objects */
+ unsigned int max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+struct dpaa2_sec_qp {
+ struct dpaa2_queue rx_vq;
+ struct dpaa2_queue tx_vq;
+};
+
+enum shr_desc_type {
+ DESC_UPDATE,
+ DESC_FINAL,
+ DESC_INITFINAL,
+};
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+#define DPAA2_SET_FLC_EWS(flc) (flc->word1_bits23_16 |= 0x1)
+#define DPAA2_SET_FLC_RSC(flc) (flc->word1_bits31_24 |= 0x1)
+#define DPAA2_SET_FLC_REUSE_BS(flc) (flc->mode_bits |= 0x8000)
+#define DPAA2_SET_FLC_REUSE_FF(flc) (flc->mode_bits |= 0x2000)
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+ /* word 0 */
+ uint16_t word0_sdid; /* 11-0 SDID */
+ uint16_t word0_res; /* 31-12 reserved */
+
+ /* word 1 */
+ uint8_t word1_sdl; /* 5-0 SDL */
+ /* 7-6 reserved */
+
+ uint8_t word1_bits_15_8; /* 11-8 CRID */
+ /* 14-12 reserved */
+ /* 15 CRJD */
+
+ uint8_t word1_bits23_16; /* 16 EWS */
+ /* 17 DAC */
+ /* 18,19,20 ? */
+ /* 23-21 reserved */
+
+ uint8_t word1_bits31_24; /* 24 RSC */
+ /* 25 RBMT */
+ /* 31-26 reserved */
+
+ /* word 2 RFLC[31-0] */
+ uint32_t word2_rflc_31_0;
+
+ /* word 3 RFLC[63-32] */
+ uint32_t word3_rflc_63_32;
+
+ /* word 4 */
+ uint16_t word4_iicid; /* 15-0 IICID */
+ uint16_t word4_oicid; /* 31-16 OICID */
+
+ /* word 5 */
+ uint32_t word5_ofqid:24; /* 23-0 OFQID */
+ uint32_t word5_31_24:8;
+ /* 24 OSC */
+ /* 25 OBMT */
+ /* 29-26 reserved */
+ /* 31-30 ICR */
+
+ /* word 6 */
+ uint32_t word6_oflc_31_0;
+
+ /* word 7 */
+ uint32_t word7_oflc_63_32;
+
+ /* Word 8-15 storage profiles */
+ uint16_t dl; /**< DataLength(correction) */
+ uint16_t reserved; /**< reserved */
+ uint16_t dhr; /**< DataHeadRoom(correction) */
+ uint16_t mode_bits; /**< mode bits */
+ uint16_t bpv0; /**< buffer pool0 valid */
+ uint16_t bpid0; /**< Bypass Memory Translation */
+ uint16_t bpv1; /**< buffer pool1 valid */
+ uint16_t bpid1; /**< Bypass Memory Translation */
+ uint64_t word_12_15[2]; /**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+ struct sec_flow_context flc;
+ uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
+ struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+ DPAA2_SEC_NONE, /*!< No Cipher operations*/
+ DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_AEAD, /*!< AEAD (AES-GCM/CCM) type operations */
+ DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
+ * associated data
+ */
+ DPAA2_SEC_HASH_CIPHER, /*!< Encryption with Authenticated
+ * associated data
+ */
+ DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA2_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA2_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_aead_ctxt {
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
+};
+
+typedef struct dpaa2_sec_session_entry {
+ void *ctxt;
+ uint8_t ctxt_type;
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+} dpaa2_sec_session;
+
+static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability dpaa2_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
+#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
new file mode 100644
index 00000000..ce946ccb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#include <stdint.h>
+#include <errno.h>
+
+#ifdef __GLIBC__
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#ifndef __BYTE_ORDER__
+#error "Undefined endianness"
+#endif
+
+#else
+#error Environment not supported!
+#endif
+
+#ifndef __always_inline
+#define __always_inline __rte_always_inline
+#endif
+
+#ifndef __always_unused
+#define __always_unused __attribute__((unused))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
+#endif
+
+#if defined(__GLIBC__) && !defined(pr_debug)
+#if !defined(SUPPRESS_PRINTS) && defined(RTA_DEBUG)
+#define pr_debug(fmt, ...) \
+ RTE_LOG(DEBUG, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_debug */
+
+#if defined(__GLIBC__) && !defined(pr_err)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_err(fmt, ...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_err(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_err */
+
+#if defined(__GLIBC__) && !defined(pr_warn)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_warn(fmt, ...) \
+ RTE_LOG(WARNING, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_warn(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_warn */
+
+/**
+ * ARRAY_SIZE - returns the number of elements in an array
+ * @x: array
+ */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
+ ~((__typeof__(x))(a) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef upper_32_bits
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#endif
+
+#ifndef lower_32_bits
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+#endif
+
+/* Use Linux naming convention */
+#ifdef __GLIBC__
+ #define swab16(x) rte_bswap16(x)
+ #define swab32(x) rte_bswap32(x)
+ #define swab64(x) rte_bswap64(x)
+ /* Define cpu_to_be32 macro if not defined in the build environment */
+ #if !defined(cpu_to_be32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_be32(x) (x)
+ #else
+ #define cpu_to_be32(x) swab32(x)
+ #endif
+ #endif
+ /* Define cpu_to_le32 macro if not defined in the build environment */
+ #if !defined(cpu_to_le32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_le32(x) swab32(x)
+ #else
+ #define cpu_to_le32(x) (x)
+ #endif
+ #endif
+#endif
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
new file mode 100644
index 00000000..e9255832
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -0,0 +1,2568 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+/*
+ * SEC descriptor composition header.
+ * Definitions to support SEC descriptor instruction generation
+ */
+
+#ifndef __RTA_DESC_H__
+#define __RTA_DESC_H__
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/* Max size of any SEC descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+#define CAAM_CMD_SZ sizeof(uint32_t)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK (0x1f << CMD_SHIFT)
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEDW (0x06 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION ((uint32_t)(0x10 << CMD_SHIFT))
+#define CMD_SIGNATURE ((uint32_t)(0x12 << CMD_SHIFT))
+#define CMD_JUMP ((uint32_t)(0x14 << CMD_SHIFT))
+#define CMD_MATH ((uint32_t)(0x15 << CMD_SHIFT))
+#define CMD_DESC_HDR ((uint32_t)(0x16 << CMD_SHIFT))
+#define CMD_SHARED_DESC_HDR ((uint32_t)(0x17 << CMD_SHIFT))
+#define CMD_MATHI ((uint32_t)(0x1d << CMD_SHIFT))
+#define CMD_SEQ_IN_PTR ((uint32_t)(0x1e << CMD_SHIFT))
+#define CMD_SEQ_OUT_PTR ((uint32_t)(0x1f << CMD_SHIFT))
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/* ICV Check bits for Algo Operation command */
+#define ICV_CHECK_DISABLE 0
+#define ICV_CHECK_ENABLE 1
+
+/* Encap Mode check bits for Algo Operation command */
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Extended Job Descriptor Header
+ */
+#define HDR_EXT BIT(24)
+
+/*
+ * Read input frame as soon as possible (SHR HDR)
+ */
+#define HDR_RIF BIT(25)
+
+/*
+ * Require SEQ LIODN to be the Same (JOB HDR)
+ */
+#define HDR_RSLS BIT(25)
+
+/*
+ * Do Not Run - marks a descriptor not executable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR BIT(24)
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE BIT(23)
+#define HDR_ZRO BIT(15)
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED BIT(14)
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED BIT(13)
+
+/* Clear Input FiFO (if SharedDesc) */
+#define HDR_CLEAR_IFIFO BIT(13)
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX BIT(12)
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED BIT(12)
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE BIT(11)
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR BIT(11)
+
+/* DECO Select Valid */
+#define HDR_EXT_DSEL_VALID BIT(7)
+
+/* Fake trusted descriptor */
+#define HDR_EXT_FTD BIT(8)
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS1 (1 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS2 (2 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF BIT(24)
+#define KEY_VLF BIT(24)
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM BIT(23)
+
+/*
+ * Already in Input Data FIFO - the Input Data Sequence is not read, since it is
+ * already in the Input Data FIFO.
+ */
+#define KEY_AIDF BIT(23)
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if this descriptor is trusted
+ */
+#define KEY_ENC BIT(22)
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB BIT(21)
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT BIT(20)
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK BIT(15)
+
+/*
+ * Plaintext Store
+ */
+#define KEY_PTS BIT(14)
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF BIT(24)
+#define LDST_VLF BIT(24)
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM BIT(23)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQCTRL (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQDAR (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_STAT (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_DCHKSM (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PID (0x04 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_GTR (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STR (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZL (0x70 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_L (0x72 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_M (0x73 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZL (0x74 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZM (0x75 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IFNSR (0x76 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_OFNSR (0x77 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_ALTSOURCE (0x78 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO BIT(2)
+#define LDOFF_DISABLE_AUTO_NFIFO BIT(3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT BIT(7)
+#define LDLEN_RST_CHA_OFIFO_PTR BIT(6)
+#define LDLEN_RST_OFIFO BIT(5)
+#define LDLEN_SET_OFIFO_OFF_VALID BIT(4)
+#define LDLEN_SET_OFIFO_OFF_RSVD BIT(3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE BIT(0)
+#define CLRW_CLR_C1DATAS BIT(2)
+#define CLRW_CLR_C1ICV BIT(3)
+#define CLRW_CLR_C1CTX BIT(5)
+#define CLRW_CLR_C1KEY BIT(6)
+#define CLRW_CLR_PK_A BIT(12)
+#define CLRW_CLR_PK_B BIT(13)
+#define CLRW_CLR_PK_N BIT(14)
+#define CLRW_CLR_PK_E BIT(15)
+#define CLRW_CLR_C2MODE BIT(16)
+#define CLRW_CLR_C2KEYS BIT(17)
+#define CLRW_CLR_C2DATAS BIT(18)
+#define CLRW_CLR_C2CTX BIT(21)
+#define CLRW_CLR_C2KEY BIT(22)
+#define CLRW_RESET_CLS2_DONE BIT(26) /* era 4 */
+#define CLRW_RESET_CLS1_DONE BIT(27) /* era 4 */
+#define CLRW_RESET_CLS2_CHA BIT(28) /* era 4 */
+#define CLRW_RESET_CLS1_CHA BIT(29) /* era 4 */
+#define CLRW_RESET_OFIFO BIT(30) /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO BIT(31) /* era 3 */
+
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL BIT(0)
+#define CCTRL_RESET_CHA_AESA BIT(1)
+#define CCTRL_RESET_CHA_DESA BIT(2)
+#define CCTRL_RESET_CHA_AFHA BIT(3)
+#define CCTRL_RESET_CHA_KFHA BIT(4)
+#define CCTRL_RESET_CHA_SF8A BIT(5)
+#define CCTRL_RESET_CHA_PKHA BIT(6)
+#define CCTRL_RESET_CHA_MDHA BIT(7)
+#define CCTRL_RESET_CHA_CRCA BIT(8)
+#define CCTRL_RESET_CHA_RNG BIT(9)
+#define CCTRL_RESET_CHA_SF9A BIT(10)
+#define CCTRL_RESET_CHA_ZUCE BIT(11)
+#define CCTRL_RESET_CHA_ZUCA BIT(12)
+#define CCTRL_UNLOAD_PK_A0 BIT(16)
+#define CCTRL_UNLOAD_PK_A1 BIT(17)
+#define CCTRL_UNLOAD_PK_A2 BIT(18)
+#define CCTRL_UNLOAD_PK_A3 BIT(19)
+#define CCTRL_UNLOAD_PK_B0 BIT(20)
+#define CCTRL_UNLOAD_PK_B1 BIT(21)
+#define CCTRL_UNLOAD_PK_B2 BIT(22)
+#define CCTRL_UNLOAD_PK_B3 BIT(23)
+#define CCTRL_UNLOAD_PK_N BIT(24)
+#define CCTRL_UNLOAD_PK_A BIT(26)
+#define CCTRL_UNLOAD_PK_B BIT(27)
+#define CCTRL_UNLOAD_SBOX BIT(28)
+
+/* IRQ Control Register (CxCIRQ) bits */
+#define CIRQ_ADI BIT(1)
+#define CIRQ_DDI BIT(2)
+#define CIRQ_RCDI BIT(3)
+#define CIRQ_KDI BIT(4)
+#define CIRQ_S8DI BIT(5)
+#define CIRQ_PDI BIT(6)
+#define CIRQ_MDI BIT(7)
+#define CIRQ_CDI BIT(8)
+#define CIRQ_RNDI BIT(9)
+#define CIRQ_S9DI BIT(10)
+#define CIRQ_ZEDI BIT(11) /* valid for Era 5 or higher */
+#define CIRQ_ZADI BIT(12) /* valid for Era 5 or higher */
+#define CIRQ_AEI BIT(17)
+#define CIRQ_DEI BIT(18)
+#define CIRQ_RCEI BIT(19)
+#define CIRQ_KEI BIT(20)
+#define CIRQ_S8EI BIT(21)
+#define CIRQ_PEI BIT(22)
+#define CIRQ_MEI BIT(23)
+#define CIRQ_CEI BIT(24)
+#define CIRQ_RNEI BIT(25)
+#define CIRQ_S9EI BIT(26)
+#define CIRQ_ZEEI BIT(27) /* valid for Era 5 or higher */
+#define CIRQ_ZAEI BIT(28) /* valid for Era 5 or higher */
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_BOTH (0x03 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF BIT(24)
+#define FIFOLDST_VLF BIT(24)
+
+/*
+ * Immediate - Data follows command in descriptor
+ * AIDF - Already in Input Data FIFO
+ */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_AIDF_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM BIT(23)
+#define FIFOLD_AIDF BIT(23)
+
+#define FIFOST_IMM_SHIFT 23
+#define FIFOST_IMM_MASK (1 << FIFOST_IMM_SHIFT)
+#define FIFOST_IMM BIT(23)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT BIT(23)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT BIT(22)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0f << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA2 (0x31 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_DIFFIEHELLMAN (0x17 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENCRYPT (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADECRYPT (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_PDU (0x32 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_SDU (0x33 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER (0x42 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL (0x43 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL_MIXED (0x44 << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_NULL 0x0B00
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+#define OP_PCL_IPSEC_AES_NULL_WITH_GMAC 0x1500
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_AES_CMAC_96 0x0008
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
+
+#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
+#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
+#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
+#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
+#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
+#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
+#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
+#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
+#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
+#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
+#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
+#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
+#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
+#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
+#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
+#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
+#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
+#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
+#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* 3G DCRC protinfos */
+#define OP_PCL_3G_DCRC_CRC7 0x0710
+#define OP_PCL_3G_DCRC_CRC11 0x0B10
+
+/* 3G RLC protinfos */
+#define OP_PCL_3G_RLC_NULL 0x0000
+#define OP_PCL_3G_RLC_KASUMI 0x0001
+#define OP_PCL_3G_RLC_SNOW 0x0002
+
+/* LTE protinfos */
+#define OP_PCL_LTE_NULL 0x0000
+#define OP_PCL_LTE_SNOW 0x0001
+#define OP_PCL_LTE_AES 0x0002
+#define OP_PCL_LTE_ZUC 0x0003
+
+/* LTE mixed protinfos */
+#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
+#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_DSA_MSG BIT(10)
+#define OP_PCL_PKPROT_HASH_SHIFT 7
+#define OP_PCL_PKPROT_HASH_MASK (7 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_MD5 (0 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA1 (1 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA224 (2 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA256 (3 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA384 (4 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA512 (5 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_EKT_Z BIT(6)
+#define OP_PCL_PKPROT_DECRYPT_Z BIT(5)
+#define OP_PCL_PKPROT_EKT_PRI BIT(4)
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT_PRI BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* Blob protinfos */
+#define OP_PCL_BLOB_TKEK_SHIFT 9
+#define OP_PCL_BLOB_TKEK BIT(9)
+#define OP_PCL_BLOB_EKT_SHIFT 8
+#define OP_PCL_BLOB_EKT BIT(8)
+#define OP_PCL_BLOB_REG_SHIFT 4
+#define OP_PCL_BLOB_REG_MASK (0xF << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_MEMORY (0x0 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY1 (0x1 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY2 (0x3 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_AFHA_SBOX (0x5 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_SPLIT (0x7 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_PKE (0x9 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_SEC_MEM_SHIFT 3
+#define OP_PCL_BLOB_SEC_MEM BIT(3)
+#define OP_PCL_BLOB_BLACK BIT(2)
+#define OP_PCL_BLOB_FORMAT_SHIFT 0
+#define OP_PCL_BLOB_FORMAT_MASK 0x3
+#define OP_PCL_BLOB_FORMAT_NORMAL 0
+#define OP_PCL_BLOB_FORMAT_MASTER_VER 2
+#define OP_PCL_BLOB_FORMAT_TEST 3
+
+/* IKE / IKEv2 protinfos */
+#define OP_PCL_IKE_HMAC_MD5 0x0100
+#define OP_PCL_IKE_HMAC_SHA1 0x0200
+#define OP_PCL_IKE_HMAC_AES128_CBC 0x0400
+#define OP_PCL_IKE_HMAC_SHA256 0x0500
+#define OP_PCL_IKE_HMAC_SHA384 0x0600
+#define OP_PCL_IKE_HMAC_SHA512 0x0700
+#define OP_PCL_IKE_HMAC_AES128_CMAC 0x0800
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* RSA Protinfo */
+#define OP_PCL_RSAPROT_OP_MASK 3
+#define OP_PCL_RSAPROT_OP_ENC_F_IN 0
+#define OP_PCL_RSAPROT_OP_ENC_F_OUT 1
+#define OP_PCL_RSAPROT_OP_DEC_ND 0
+#define OP_PCL_RSAPROT_OP_DEC_PQD 1
+#define OP_PCL_RSAPROT_OP_DEC_PQDPDQC 2
+#define OP_PCL_RSAPROT_FFF_SHIFT 4
+#define OP_PCL_RSAPROT_FFF_MASK (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_RED (0 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_ENC (1 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_ENC (5 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_EKT (3 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_EKT (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_PPP_SHIFT 8
+#define OP_PCL_RSAPROT_PPP_MASK (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_RED (0 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_ENC (1 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_ENC (5 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_EKT (3 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_EKT (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_FMT_PKCSV15 BIT(12)
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 (0x2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (0x4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCE (0xB0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCA (0xC0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x3ff << OP_ALG_AAI_SHIFT)
+
+/* block cipher AAI set */
+#define OP_ALG_AESA_MODE_MASK (0xF0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_CMAC (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC_LTE (0xd0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC (0xe0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_C2K (0x200 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_RNG_MODE_MASK (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_SHIFT OP_ALG_AAI_SHIFT
+#define OP_ALG_AAI_RNG4_SH_MASK (0x03 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_CRC_POLY_MASK (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_IVZ (0x80 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW/ZUC AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF 0
+#define OP_ALG_ICV_ON BIT(1)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT BIT(0)
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM BIT(19)
+#define OP_ALG_PKMODE_B_RAM BIT(18)
+#define OP_ALG_PKMODE_E_RAM BIT(17)
+#define OP_ALG_PKMODE_N_RAM BIT(16)
+#define OP_ALG_PKMODE_CLEARMEM BIT(0)
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_CLEARMEM_ALL (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AB (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_A (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_B (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_EN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_E (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_N (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_N_RAM)
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY BIT(19)
+#define OP_ALG_PKMODE_MOD_OUT_MONTY BIT(18)
+#define OP_ALG_PKMODE_MOD_F2M BIT(17)
+#define OP_ALG_PKMODE_MOD_R2_IN BIT(16)
+#define OP_ALG_PKMODE_PRJECTV BIT(11)
+#define OP_ALG_PKMODE_TIME_EQ BIT(10)
+
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+
+/*
+ * PKHA mode modular-arithmetic integer functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_MULT_IM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_MULT_IM_OM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_EXPO_TEQ (0x006 | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_EXPO_IM (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO_IM_TEQ (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_SML_EXP 0x016
+
+/*
+ * PKHA mode modular-arithmetic F2m functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_F2M_ADD (0x002 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL (0x005 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL_IM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_F2M_MUL_IM_OM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_F2M_EXP (0x006 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_EXP_TEQ (0x006 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_F2M_AMODN (0x007 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_INV (0x008 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_R2 (0x00c | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_GCD (0x00e | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_SML_EXP (0x016 | OP_ALG_PKMODE_MOD_F2M)
+
+/*
+ * PKHA mode ECC Integer arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_MOD_ADD 0x009
+#define OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_DBL 0x00a
+#define OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL 0x00b
+#define OP_ALG_PKMODE_ECC_MOD_MUL_TEQ (0x00b | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2 (0x00b | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/*
+ * PKHA mode ECC F2m arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_F2M_ADD (0x009 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_DBL (0x00a | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL (0x00b | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2 \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+/* PKHA mode copy-memory functions - amount based on N SIZE */
+#define OP_ALG_PKMODE_COPY_NSZ 0x10
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_A_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_B_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_B_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_N_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_N_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_N_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/* PKHA mode copy-memory functions - amount based on SRC SIZE */
+#define OP_ALG_PKMODE_COPY_SSZ 0x11
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_A_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_B_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_B_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_N_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_N_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_N_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS BIT(26)
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL BIT(25)
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQIN_PRE BIT(23)
+
+/* Use extended length following pointer */
+#define SQIN_EXT BIT(22)
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO BIT(21)
+
+/* Replace job descriptor */
+#define SQIN_RJD BIT(20)
+
+/* Sequence Out Pointer - start a new input sequence using output sequence */
+#define SQIN_SOP BIT(19)
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE BIT(23)
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO BIT(21)
+
+/*
+ * Ignore length field, add current output frame length back to SOL register.
+ * Reset tracking length of bytes written to output frame.
+ * Must be used together with SQOUT_RTO.
+ */
+#define SQOUT_RST BIT(20)
+
+/* Allow "write safe" transactions for this Output Sequence */
+#define SQOUT_EWS BIT(19)
+
+/* Use extended length following pointer */
+#define SQOUT_EXT BIT(22)
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP BIT(24)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_NO_NFIFO (0x0a << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_ALTSOURCE (0x0f << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+#define MOVELEN_SIZE_SHIFT 6
+#define MOVELEN_SIZE_MASK (0x3 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_WORD (0x01 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_BYTE (0x02 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_DWORD (0x03 << MOVELEN_SIZE_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB BIT(26)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU BIT(25)
+
+/* STL for MATH, SSEL for MATHI */
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL BIT(24)
+
+#define MATH_SSEL_SHIFT 24
+#define MATH_SSEL_MASK (1 << MATH_SSEL_SHIFT)
+#define MATH_SSEL BIT(24)
+
+#define MATH_SWP_SHIFT 0
+#define MATH_SWP_MASK (1 << MATH_SWP_SHIFT)
+#define MATH_SWP BIT(0)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) /* ZBYT is for MATH */
+#define MATH_FUN_FBYT (0x0a << MATH_FUN_SHIFT) /* FBYT is for MATHI */
+#define MATH_FUN_BSWAP (0x0b << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ONE (0x0f << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATHI_SRC1_SHIFT 16
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_JOBSOURCE (0x0d << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATHI_DEST_SHIFT 12
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* MATHI Immediate value */
+#define MATHI_IMM_SHIFT 4
+#define MATHI_IMM_MASK (0xff << MATHI_IMM_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL BIT(24)
+
+#define JUMP_TYPE_SHIFT 20
+#define JUMP_TYPE_MASK (0x0f << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_INC (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_GOSUB (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_DEC (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x04 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_RETURN (0x06 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x08 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x0c << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK ((0xff << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_PK_0 BIT(15)
+#define JUMP_COND_PK_GCD_1 BIT(14)
+#define JUMP_COND_PK_PRIME BIT(13)
+#define JUMP_COND_MATH_N BIT(11)
+#define JUMP_COND_MATH_Z BIT(10)
+#define JUMP_COND_MATH_C BIT(9)
+#define JUMP_COND_MATH_NV BIT(8)
+
+#define JUMP_COND_JQP (BIT(15) | JUMP_JSL)
+#define JUMP_COND_SHRD (BIT(14) | JUMP_JSL)
+#define JUMP_COND_SELF (BIT(13) | JUMP_JSL)
+#define JUMP_COND_CALM (BIT(12) | JUMP_JSL)
+#define JUMP_COND_NIP (BIT(11) | JUMP_JSL)
+#define JUMP_COND_NIFP (BIT(10) | JUMP_JSL)
+#define JUMP_COND_NOP (BIT(9) | JUMP_JSL)
+#define JUMP_COND_NCP (BIT(8) | JUMP_JSL)
+
+/* Source / destination selectors */
+#define JUMP_SRC_DST_SHIFT 12
+#define JUMP_SRC_DST_MASK (0x0f << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH0 (0x00 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH1 (0x01 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH2 (0x02 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH3 (0x03 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_DPOVRD (0x07 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQINLEN (0x08 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQOUTLEN (0x09 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQINLEN (0x0a << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQOUTLEN (0x0b << JUMP_SRC_DST_SHIFT)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 ((uint32_t)(2 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_BOTH ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 BIT(29)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 BIT(28)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 BIT(27)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 BIT(26)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_ALTSOURCE ((0 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_OFIFO_SYNC ((1 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_SNOOP_ALT ((3 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND BIT(19)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC BIT(15)
+
+#define NFIFOENTRY_PR_SHIFT 15
+#define NFIFOENTRY_PR_MASK (1 << NFIFOENTRY_PR_SHIFT)
+#define NFIFOENTRY_PR BIT(15)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_AST_SHIFT)
+#define NFIFOENTRY_AST BIT(14)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM BIT(11)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS BIT(10)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE BIT(31)
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN BIT(30)
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC BIT(29)
+
+#endif /* __RTA_DESC_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
new file mode 100644
index 00000000..91f3e067
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_ALGO_H__
+#define __DESC_ALGO_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: Algorithms - Shared Descriptor Constructors
+ *
+ * Shared descriptors for algorithms (i.e. not for protocols).
+ */
+
+/**
+ * cnstr_shdsc_snow_f8 - SNOW/f8 (UEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @bearer: UEA2 bearer ID (5 bits)
+ * @direction: UEA2 direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ct = count;
+ uint8_t br = bearer;
+ uint8_t dr = direction;
+ uint32_t context[2] = {ct, (br << 27) | (dr << 26)};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_snow_f9 - SNOW/f9 (UIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @fresh: UEA2 fresh value ID (32 bits)
+ * @direction: UEA2 direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir, uint32_t count,
+ uint32_t fresh, uint8_t direction, uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t fr = fresh;
+ uint64_t dr = direction;
+ uint64_t context[2];
+
+ context[0] = (ct << 32) | (dr << 26);
+ context[1] = fr << 32;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab64(context[0]);
+ context[1] = swab64(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT2, 0, 16, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS2 | LAST2);
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_blkcipher - block cipher transformation
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
+ * @ivlen: IV length
+ * @dir: DIR_ENC/DIR_DEC
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t *iv,
+ uint32_t ivlen, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv_off = 0;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+ LABEL(keyjmp);
+ LABEL(skipdk);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipdk);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (need_dk) {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+
+ pskipdk = JUMP(p, skipdk, LOCAL_JUMP, ALL_TRUE, 0);
+ }
+ SET_LABEL(p, keyjmp);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipdk);
+ } else {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ iv_off = 16;
+
+ if (iv)
+ /* IV load, convert size */
+ LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
+ else
+ /* IV is present first before the actual message */
+ SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+
+ /* Insert sequence load/store with VLF */
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ if (need_dk)
+ PATCH_JUMP(p, pskipdk, skipdk);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_hmac - HMAC shared
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions;
+ * message digest algorithm: OP_ALG_ALGSEL_MD5/ SHA1-512.
+ * @do_icv: 0 if ICV checking is not desired, any other value if ICV checking
+ * is needed for all the packets processed by this shared descriptor
+ * @trunc_len: Length of the truncated ICV to be written in the output buffer, 0
+ * if no truncation is needed
+ *
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_hmac(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t do_icv,
+ uint8_t trunc_len)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint8_t storelen, opicv, dir;
+ LABEL(keyjmp);
+ LABEL(jmpprecomp);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pjmpprecomp);
+
+ /* Compute fixed-size store based on alg selection */
+ switch (authdata->algtype) {
+ case OP_ALG_ALGSEL_MD5:
+ storelen = 16;
+ break;
+ case OP_ALG_ALGSEL_SHA1:
+ storelen = 20;
+ break;
+ case OP_ALG_ALGSEL_SHA224:
+ storelen = 28;
+ break;
+ case OP_ALG_ALGSEL_SHA256:
+ storelen = 32;
+ break;
+ case OP_ALG_ALGSEL_SHA384:
+ storelen = 48;
+ break;
+ case OP_ALG_ALGSEL_SHA512:
+ storelen = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trunc_len = trunc_len && (trunc_len < storelen) ? trunc_len : storelen;
+
+ opicv = do_icv ? ICV_CHECK_ENABLE : ICV_CHECK_DISABLE;
+ dir = do_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ pjmpprecomp = JUMP(p, jmpprecomp, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ SET_LABEL(p, jmpprecomp);
+
+ /* compute sequences */
+ if (opicv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+
+ /* Do load (variable length) */
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ if (opicv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pjmpprecomp, jmpprecomp);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f8 - KASUMI F8 (Confidentiality) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @bearer: bearer ID (5 bits)
+ * @direction: direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t br = bearer;
+ uint64_t dr = direction;
+ uint32_t context[2] = { ct, (br << 27) | (dr << 26) };
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f9 - KASUMI F9 (Integrity) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @fresh: fresh value ID (32 bits)
+ * @direction: direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir,
+ uint32_t count, uint32_t fresh, uint8_t direction,
+ uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint16_t ctx_offset = 16;
+ uint32_t context[6] = {count, direction << 26, fresh, 0, 0, 0};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ context[2] = swab32(context[2]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 24, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS1 | LAST1);
+ /* Save output MAC of DWORD 2 into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT1, ctx_offset, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump2);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump2);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+ pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+ SET_LABEL(p, zeroassocjump1);
+
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* write encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+ /* jump the zero-payload commands */
+ JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* zero-payload commands */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+ JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* There is no input data */
+ SET_LABEL(p, zeroassocjump2);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+ /* write ICV */
+ SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+ SET_LABEL(p, zeroassocjump1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+ /* store encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+ /* zero-payload command */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read ICV */
+ SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_ALGO_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
new file mode 100644
index 00000000..98425d8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_COMMON_H__
+#define __DESC_COMMON_H__
+
+#include "hw/rta.h"
+
+/**
+ * DOC: Shared Descriptor Constructors - shared structures
+ *
+ * Data structures shared between algorithm, protocol implementations.
+ */
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_enc_flags: key encryption flags; see encrypt_flags parameter of KEY
+ * command for valid values.
+ * @key_type: enum rta_data_type
+ * @algmode: algorithm mode selector; for valid values, see documentation of the
+ * functions where it is used.
+ */
+struct alginfo {
+ uint32_t algtype;
+ uint32_t keylen;
+ uint64_t key;
+ uint32_t key_enc_flags;
+ enum rta_data_type key_type;
+ uint16_t algmode;
+};
+
+#define INLINE_KEY(alginfo) inline_flags(alginfo->key_type)
+
+/**
+ * rta_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int
+rta_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len,
+ unsigned int *data_len,
+ uint32_t *inl_mask,
+ unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * struct protcmd - Container for Protocol Operation Command fields
+ * @optype: command type
+ * @protid: protocol Identifier
+ * @protinfo: protocol Information
+ */
+struct protcmd {
+ uint32_t optype;
+ uint32_t protid;
+ uint16_t protinfo;
+};
+
+#endif /* __DESC_COMMON_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
new file mode 100644
index 00000000..35cc02a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -0,0 +1,1521 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __DESC_IPSEC_H__
+#define __DESC_IPSEC_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: IPsec Shared Descriptor Constructors
+ *
+ * Shared descriptors for IPsec protocol.
+ */
+
+/* General IPSec ESP encap / decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ESN - Extended sequence included
+ */
+#define PDBOPTS_ESP_ESN 0x10
+
+/**
+ * PDBOPTS_ESP_IPVSN - Process IPv6 header
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPVSN 0x02
+
+/**
+ * PDBOPTS_ESP_TUNNEL - Tunnel mode next-header byte
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_TUNNEL 0x01
+
+/* IPSec ESP Encap PDB options */
+
+/**
+ * PDBOPTS_ESP_UPDATE_CSUM - Update ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80
+
+/**
+ * PDBOPTS_ESP_DIFFSERV - Copy TOS/TC from inner iphdr
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_DIFFSERV 0x40
+
+/**
+ * PDBOPTS_ESP_IVSRC - IV comes from internal random gen
+ */
+#define PDBOPTS_ESP_IVSRC 0x20
+
+/**
+ * PDBOPTS_ESP_IPHDRSRC - IP header comes from PDB
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPHDRSRC 0x08
+
+/**
+ * PDBOPTS_ESP_INCIPHDR - Prepend IP header to output frame
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_INCIPHDR 0x04
+
+/**
+ * PDBOPTS_ESP_OIHI_MASK - Mask for Outer IP Header Included
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_MASK 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_INL - Prepend IP header to output frame from PDB (where
+ * it is inlined).
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_INL 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_REF - Prepend IP header to output frame from PDB
+ * (referenced by pointer).
+ *
+ * Vlid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_REF 0x08
+
+/**
+ * PDBOPTS_ESP_OIHI_IF - Prepend IP header to output frame from input frame
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_IF 0x04
+
+/**
+ * PDBOPTS_ESP_NAT - Enable RFC 3948 UDP-encapsulated-ESP
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NAT 0x02
+
+/**
+ * PDBOPTS_ESP_NUC - Enable NAT UDP Checksum
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NUC 0x01
+
+/* IPSec ESP Decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ARS_MASK - antireplay window mask
+ */
+#define PDBOPTS_ESP_ARS_MASK 0xc0
+
+/**
+ * PDBOPTS_ESP_ARSNONE - No antireplay window
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00
+
+/**
+ * PDBOPTS_ESP_ARS64 - 64-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS64 0xc0
+
+/**
+ * PDBOPTS_ESP_ARS128 - 128-entry antireplay window
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ARS128 0x80
+
+/**
+ * PDBOPTS_ESP_ARS32 - 32-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS32 0x40
+
+/**
+ * PDBOPTS_ESP_VERIFY_CSUM - Validate ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20
+
+/**
+ * PDBOPTS_ESP_TECN - Implement RRFC6040 ECN tunneling from outer header to
+ * inner header.
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_TECN 0x20
+
+/**
+ * PDBOPTS_ESP_OUTFMT - Output only decapsulation
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_OUTFMT 0x08
+
+/**
+ * PDBOPTS_ESP_AOFL - Adjust out frame len
+ *
+ * Valid only for IPsec legacy mode and for SEC >= 5.3.
+ */
+#define PDBOPTS_ESP_AOFL 0x04
+
+/**
+ * PDBOPTS_ESP_ETU - EtherType Update
+ *
+ * Add corresponding ethertype (0x0800 for IPv4, 0x86dd for IPv6) in the output
+ * frame.
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ETU 0x01
+
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
+/**
+ * PDBHMO_ESP_DECAP_DTTL - IPsec ESP decrement TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_DECAP_DTTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ENCAP_DTTL - IPsec ESP increment TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_ENCAP_DTTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DIFFSERV - (Decap) DiffServ Copy - Copy the IPv4 TOS or IPv6
+ * Traffic Class byte from the outer IP header to the
+ * inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_SNR - (Encap) - Sequence Number Rollover control
+ *
+ * Configures behaviour in case of SN / ESN rollover:
+ * error if SNR = 1, rollover allowed if SNR = 0.
+ * Valid only for IPsec new mode.
+ */
+#define PDBHMO_ESP_SNR (0x01 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFBIT - (Encap) Copy DF bit - if an IPv4 tunnel mode outer IP
+ * header is coming from the PDB, copy the DF bit from the
+ * inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFV - (Decap) - DF bit value
+ *
+ * If ODF = 1, DF bit in output frame is replaced by DFV.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_DFV (0x04 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ODF - (Decap) Override DF bit in IPv4 header of decapsulated
+ * output frame.
+ *
+ * If ODF = 1, DF is replaced with the value of DFV bit.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_ODF (0x08 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * struct ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+ uint8_t iv[16];
+};
+
+
+/**
+ * struct ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header (legacy) / reserved (new) - 8b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content (only for IPsec legacy mode)
+ */
+struct ipsec_encap_pdb {
+ uint32_t options;
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ uint32_t spi;
+ uint32_t ip_hdr_len;
+ uint8_t ip_hdr[0];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_encap_pdb(struct program *program,
+ struct ipsec_encap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, pdb->options);
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ rta_copy_data(program, pdb->cbc.iv, sizeof(pdb->cbc.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ __rta_out64(program, true, pdb->ctr.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ __rta_out64(program, true, pdb->ccm.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ __rta_out64(program, true, pdb->gcm.iv);
+ break;
+ }
+
+ __rta_out32(program, pdb->spi);
+ __rta_out32(program, pdb->ip_hdr_len);
+
+ return start_pc;
+}
+
+/**
+ * struct ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+ uint32_t rsvd[2];
+};
+
+/**
+ * struct ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+};
+
+/**
+ * struct ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+};
+
+/**
+ * struct ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+};
+
+/**
+ * struct ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags);
+ * format must be Big Endian, irrespective of platform
+ */
+struct ipsec_decap_pdb {
+ uint32_t options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ uint32_t anti_replay[4];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_decap_pdb(struct program *program,
+ struct ipsec_decap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int i, ars;
+
+ __rta_out32(program, pdb->options);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ __rta_out32(program, pdb->cbc.rsvd[0]);
+ __rta_out32(program, pdb->cbc.rsvd[1]);
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ break;
+ }
+
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (pdb->options & PDBOPTS_ESP_ARS_MASK) {
+ case PDBOPTS_ESP_ARS128:
+ ars = 4;
+ break;
+ case PDBOPTS_ESP_ARS64:
+ ars = 2;
+ break;
+ case PDBOPTS_ESP_ARS32:
+ ars = 1;
+ break;
+ case PDBOPTS_ESP_ARSNONE:
+ default:
+ ars = 0;
+ break;
+ }
+
+ for (i = 0; i < ars; i++)
+ __rta_out_be32(program, pdb->anti_replay[i]);
+
+ return start_pc;
+}
+
+/**
+ * enum ipsec_icv_size - Type selectors for icv size in IPsec protocol
+ * @IPSEC_ICV_MD5_SIZE: full-length MD5 ICV
+ * @IPSEC_ICV_MD5_TRUNC_SIZE: truncated MD5 ICV
+ */
+enum ipsec_icv_size {
+ IPSEC_ICV_MD5_SIZE = 16,
+ IPSEC_ICV_MD5_TRUNC_SIZE = 12
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+
+#define IPSEC_DECO_DPOVRD_USE 0x80
+
+struct ipsec_deco_dpovrd {
+ uint8_t ovrd_ecn;
+ uint8_t ip_hdr_len;
+ uint8_t nh_offset;
+ union {
+ uint8_t next_header; /* next header if encap */
+ uint8_t rsvd; /* reserved if decap */
+ };
+};
+
+struct ipsec_new_encap_deco_dpovrd {
+#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
+ uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
+ * length
+ */
+#define IPSEC_NEW_ENCAP_OIMIF 0x80
+ uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
+ * offset
+ */
+ uint8_t rsvd;
+};
+
+struct ipsec_new_decap_deco_dpovrd {
+ uint8_t ovrd;
+ uint8_t aoipho_hi; /* upper nibble of actual outer IP
+ * header
+ */
+ uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
+ * header + outer IP header material
+ */
+};
+
+static inline void
+__gen_auth_key(struct program *program, struct alginfo *authdata)
+{
+ uint32_t dkp_protid;
+
+ switch (authdata->algtype & OP_PCL_IPSEC_AUTH_MASK) {
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ dkp_protid = OP_PCLID_DKP_MD5;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ dkp_protid = OP_PCLID_DKP_SHA1;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ dkp_protid = OP_PCLID_DKP_SHA256;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ dkp_protid = OP_PCLID_DKP_SHA384;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ dkp_protid = OP_PCLID_DKP_SHA512;
+ break;
+ default:
+ KEY(program, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ return;
+ }
+
+ if (authdata->key_type == RTA_DATA_PTR)
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_PTR,
+ OP_PCL_DKP_DST_PTR, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+ else
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_IMM,
+ OP_PCL_DKP_DST_IMM, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap - IPSec ESP decapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP encapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the encapsulation output packet.
+ * The descriptor performs DES-CBC/3DES-CBC & HMAC-MD5-96 and then rereads
+ * the input packet to do the AES-XCBC-MAC-96 calculation and to overwrite
+ * the MD5 ICV.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(hdr);
+ LABEL(shd_ptr);
+ LABEL(keyjump);
+ LABEL(outptr);
+ LABEL(swapped_seqin_fields);
+ LABEL(swapped_seqin_ptr);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_outlen);
+ REFERENCE(move_seqout_ptr);
+ REFERENCE(swapped_seqin_ptr_jump);
+ REFERENCE(write_swapped_seqin_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from below in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ IMMED);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+ /* Swap SEQINPTR to SEQOUTPTR. */
+ move_seqout_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, AND, ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR), MATH1,
+ 8, IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xa00000e5, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqin_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqin_ptr_jump = JUMP(p, swapped_seqin_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ SEQOUTPTR(p, 0, 65535, RTO);
+ move_outlen = MOVE(p, DESCBUF, 0, MATH0, 4, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH0, SUB,
+ (uint64_t)(pdb->ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE),
+ VSEQINSZ, 4, IMMED2);
+ MATHB(p, MATH0, SUB, IPSEC_ICV_MD5_TRUNC_SIZE, VSEQOUTSZ, 4, IMMED2);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ SEQFIFOLOAD(p, SKIP, pdb->ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1 | LAST1);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT1, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the Shared Descriptor Pointer */
+ SET_LABEL(p, shd_ptr);
+ shd_ptr += 1;
+ /* Label the Output Pointer */
+ SET_LABEL(p, outptr);
+ outptr += 3;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqin_fields);
+ swapped_seqin_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqin_ptr);
+ swapped_seqin_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, swapped_seqin_ptr_jump, swapped_seqin_ptr);
+ PATCH_MOVE(p, move_outlen, outptr);
+ PATCH_MOVE(p, move_seqout_ptr, shd_ptr);
+ PATCH_MOVE(p, write_swapped_seqin_ptr, swapped_seqin_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP decapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the decapsulation input packet.
+ * The descriptor computes the AES-XCBC-MAC-96 to check if the received ICV
+ * is correct, rereads the input packet to compute the MD5 ICV, overwrites
+ * the XCBC ICV, and then sends the modified input packet to the
+ * DES-CBC/3DES-CBC & HMAC-MD5-96 IPsec.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ip_hdr_len = (pdb->options & PDBHDRLEN_MASK) >>
+ PDBHDRLEN_ESP_DECAP_SHIFT;
+
+ LABEL(hdr);
+ LABEL(jump_cmd);
+ LABEL(keyjump);
+ LABEL(outlen);
+ LABEL(seqin_ptr);
+ LABEL(seqout_ptr);
+ LABEL(swapped_seqout_fields);
+ LABEL(swapped_seqout_ptr);
+ REFERENCE(seqout_ptr_jump);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_jump);
+ REFERENCE(move_jump_back);
+ REFERENCE(move_seqin_ptr);
+ REFERENCE(swapped_seqout_ptr_jump);
+ REFERENCE(write_swapped_seqout_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from bellow in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), MATH0, 4,
+ IMMED2);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_MD5, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1);
+ SEQFIFOLOAD(p, ICV1, IPSEC_ICV_MD5_TRUNC_SIZE, FLUSH1 | LAST1);
+ /* Swap SEQOUTPTR to SEQINPTR. */
+ move_seqin_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 8,
+ IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xA00000e1, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqout_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqout_ptr_jump = JUMP(p, swapped_seqout_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load
+ * a command that is also written by RTA).
+ * Change when proper RTA support is added.
+ */
+ SET_LABEL(p, jump_cmd);
+ WORD(p, 0xA00000f3);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH0, ADD, ip_hdr_len, VSEQOUTSZ, 4, IMMED2);
+ move_jump = MOVE(p, DESCBUF, 0, OFIFO, 0, 8, WAITCOMP | IMMED);
+ move_jump_back = MOVE(p, OFIFO, 0, DESCBUF, 0, 8, IMMED);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT2, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+ seqout_ptr_jump = JUMP(p, seqout_ptr, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_CLR_C2MODE |
+ CLRW_CLR_C2DATAS | CLRW_CLR_C2CTX | CLRW_RESET_CLS1_CHA, CLRW, 0,
+ 4, 0);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, ADD,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), SEQINSZ, 4,
+ IMMED2);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the SEQ OUT PTR */
+ SET_LABEL(p, seqout_ptr);
+ seqout_ptr += 2;
+ /* Label the Output Length */
+ SET_LABEL(p, outlen);
+ outlen += 4;
+ /* Label the SEQ IN PTR */
+ SET_LABEL(p, seqin_ptr);
+ seqin_ptr += 5;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqout_fields);
+ swapped_seqout_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqout_ptr);
+ swapped_seqout_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, seqout_ptr_jump, seqout_ptr);
+ PATCH_JUMP(p, swapped_seqout_ptr_jump, swapped_seqout_ptr);
+ PATCH_MOVE(p, move_jump, jump_cmd);
+ PATCH_MOVE(p, move_jump_back, seqin_ptr);
+ PATCH_MOVE(p, move_seqin_ptr, outlen);
+ PATCH_MOVE(p, write_swapped_seqout_ptr, swapped_seqout_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or keys can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor
+ * length for the case of
+ * NULL encryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or key can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_encap - IPSec new mode ESP encapsulation
+ * protocol-level shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the encapsulation PDB.
+ * @opt_ip_hdr: pointer to Optional IP Header
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_INL, opt_ip_hdr points to the buffer to
+ * be inlined in the PDB. Number of bytes (buffer size) copied is provided
+ * in pdb->ip_hdr_len.
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_REF, opt_ip_hdr points to the address of
+ * the Optional IP Header. The address will be inlined in the PDB verbatim.
+ * -for other values of OIHI options field, opt_ip_hdr is not used.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_encap_pdb *pdb,
+ uint8_t *opt_ip_hdr,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode encap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+
+ switch (pdb->options & PDBOPTS_ESP_OIHI_MASK) {
+ case PDBOPTS_ESP_OIHI_PDB_INL:
+ COPY_DATA(p, opt_ip_hdr, pdb->ip_hdr_len);
+ break;
+ case PDBOPTS_ESP_OIHI_PDB_REF:
+ if (ps)
+ COPY_DATA(p, opt_ip_hdr, 8);
+ else
+ COPY_DATA(p, opt_ip_hdr, 4);
+ break;
+ default:
+ break;
+ }
+ SET_LABEL(p, hdr);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_DEC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor
+ * length for the case of
+ * NULL decryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_DEC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_decap - IPSec new mode ESP decapsulation protocol-level
+ * shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values 0 one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode decap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_AUTH_VAR_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ * for the case of variable-length authentication
+ * only data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor
+ * length for variable-length authentication only
+ * data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN \
+ (IPSEC_AUTH_VAR_BASE_DESC_LEN + CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_BASE_DESC_LEN (19 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_AES_DEC_BASE_DESC_LEN (IPSEC_AUTH_BASE_DESC_LEN + \
+ CAAM_CMD_SZ)
+
+/**
+ * cnstr_shdsc_authenc - authenc-like descriptor
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
+ * @authdata: pointer to authentication transform definitions.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512}
+ * Note: The key for authentication is supposed to be given as plain text.
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * @ivlen: length of the IV to be read from the input frame, before any data
+ * to be processed
+ * @auth_only_len: length of the data to be authenticated-only (commonly IP
+ * header, IV, Sequence number and SPI)
+ * Note: Extended Sequence Number processing is NOT supported
+ *
+ * @trunc_len: the length of the ICV to be written to the output frame. If 0,
+ * then the corresponding length of the digest, according to the
+ * selected algorithm shall be used.
+ * @dir: Protocol direction, encapsulation or decapsulation (DIR_ENC/DIR_DEC)
+ *
+ * Note: Here's how the input frame needs to be formatted so that the processing
+ * will be done correctly:
+ * For encapsulation:
+ * Input:
+ * +----+----------------+---------------------------------------------+
+ * | IV | Auth-only data | Padded data to be authenticated & Encrypted |
+ * +----+----------------+---------------------------------------------+
+ * Output:
+ * +--------------------------------------+
+ * | Authenticated & Encrypted data | ICV |
+ * +--------------------------------+-----+
+
+ * For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ *
+ * Note: This descriptor can use per-packet commands, encoded as below in the
+ * DPOVRD register:
+ * 32 24 16 0
+ * +------+---------------------+
+ * | 0x80 | 0x00| auth_only_len |
+ * +------+---------------------+
+ *
+ * This mechanism is available only for SoCs having SEC ERA >= 3. In other
+ * words, this will not work for P4080TO2
+ *
+ * Note: The descriptor does not add any kind of padding to the input data,
+ * so the upper layer needs to ensure that the data is padded properly,
+ * according to the selected cipher. Failure to do so will result in
+ * the descriptor failing with a data-size error.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ uint16_t ivlen, uint16_t auth_only_len,
+ uint8_t trunc_len, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
+
+ LABEL(skip_patch_len);
+ LABEL(keyjmp);
+ LABEL(skipkeys);
+ LABEL(aonly_len_offset);
+ REFERENCE(pskip_patch_len);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipkeys);
+ REFERENCE(read_len);
+ REFERENCE(write_len);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ /*
+ * Since we currently assume that key length is equal to hash digest
+ * size, it's ok to truncate keylen value.
+ */
+ trunc_len = trunc_len && (trunc_len < authdata->keylen) ?
+ trunc_len : (uint8_t)authdata->keylen;
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ /*
+ * M0 will contain the value provided by the user when creating
+ * the shared descriptor. If the user provided an override in
+ * DPOVRD, then M0 will contain that value
+ */
+ MATHB(p, MATH0, ADD, auth_only_len, MATH0, 4, IMMED2);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ /*
+ * Check if the user wants to override the auth-only len
+ */
+ MATHB(p, DPOVRD, ADD, 0x80000000, MATH2, 4, IMMED2);
+
+ /*
+ * No need to patch the length of the auth-only data read if
+ * the user did not override it
+ */
+ pskip_patch_len = JUMP(p, skip_patch_len, LOCAL_JUMP, ALL_TRUE,
+ MATH_N);
+
+ /* Get auth-only len in M0 */
+ MATHB(p, MATH2, AND, 0xFFFF, MATH0, 4, IMMED2);
+
+ /*
+ * Since M0 is used in calculations, don't mangle it, copy
+ * its content to M1 and use this for patching.
+ */
+ MATHB(p, MATH0, ADD, MATH1, MATH1, 4, 0);
+
+ read_len = MOVE(p, DESCBUF, 0, MATH1, 0, 6, WAITCOMP | IMMED);
+ write_len = MOVE(p, MATH1, 0, DESCBUF, 0, 8, WAITCOMP | IMMED);
+
+ SET_LABEL(p, skip_patch_len);
+ }
+ /*
+ * MATH0 contains the value in DPOVRD w/o the MSB, or the initial
+ * value, as provided by the user at descriptor creation time
+ */
+ if (dir == DIR_ENC)
+ MATHB(p, MATH0, ADD, ivlen, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, ADD, ivlen + trunc_len, MATH0, 4, IMMED2);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk)
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
+
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (need_dk) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipkeys);
+ } else {
+ SET_LABEL(p, skipkeys);
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ /*
+ * Prepare the length of the data to be both encrypted/decrypted
+ * and authenticated/checked
+ */
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ MATHB(p, VSEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* Prepare for writing the output frame */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SET_LABEL(p, aonly_len_offset);
+
+ /* Read IV */
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+ else
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ /*
+ * Read data needed only for authentication. This is overwritten above
+ * if the user requested it.
+ */
+ SEQFIFOLOAD(p, MSG2, auth_only_len, 0);
+
+ if (dir == DIR_ENC) {
+ /*
+ * Read input plaintext, encrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Finally, write the ICV */
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+ } else {
+ /*
+ * Read input ciphertext, decrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Read the ICV to check */
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ }
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ PATCH_JUMP(p, pskip_patch_len, skip_patch_len);
+ PATCH_MOVE(p, read_len, aonly_len_offset);
+ PATCH_MOVE(p, write_len, aonly_len_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_IPSEC_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
new file mode 100644
index 00000000..c4bbad0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -0,0 +1,921 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_RTA_H__
+#define __RTA_RTA_H__
+
+#include "rta/sec_run_time_asm.h"
+#include "rta/fifo_load_store_cmd.h"
+#include "rta/header_cmd.h"
+#include "rta/jump_cmd.h"
+#include "rta/key_cmd.h"
+#include "rta/load_cmd.h"
+#include "rta/math_cmd.h"
+#include "rta/move_cmd.h"
+#include "rta/nfifo_cmd.h"
+#include "rta/operation_cmd.h"
+#include "rta/protocol_cmd.h"
+#include "rta/seq_in_out_ptr_cmd.h"
+#include "rta/signature_cmd.h"
+#include "rta/store_cmd.h"
+
+/**
+ * DOC: About
+ *
+ * RTA (Runtime Assembler) Library is an easy and flexible runtime method for
+ * writing SEC descriptors. It implements a thin abstraction layer above
+ * SEC commands set; the resulting code is compact and similar to a
+ * descriptor sequence.
+ *
+ * RTA library improves comprehension of the SEC code, adds flexibility for
+ * writing complex descriptors and keeps the code lightweight. Should be used
+ * by whom needs to encode descriptors at runtime, with comprehensible flow
+ * control in descriptor.
+ */
+
+/**
+ * DOC: Usage
+ *
+ * RTA is used in kernel space by the SEC / CAAM (Cryptographic Acceleration and
+ * Assurance Module) kernel module (drivers/crypto/caam) and SEC / CAAM QI
+ * kernel module (Freescale QorIQ SDK).
+ *
+ * RTA is used in user space by USDPAA - User Space DataPath Acceleration
+ * Architecture (Freescale QorIQ SDK).
+ */
+
+/**
+ * DOC: Descriptor Buffer Management Routines
+ *
+ * Contains details of RTA descriptor buffer management and SEC Era
+ * management routines.
+ */
+
+/**
+ * PROGRAM_CNTXT_INIT - must be called before any descriptor run-time assembly
+ * call type field carry info i.e. whether descriptor is
+ * shared or job descriptor.
+ * @program: pointer to struct program
+ * @buffer: input buffer where the descriptor will be placed (uint32_t *)
+ * @offset: offset in input buffer from where the data will be written
+ * (unsigned int)
+ */
+#define PROGRAM_CNTXT_INIT(program, buffer, offset) \
+ rta_program_cntxt_init(program, buffer, offset)
+
+/**
+ * PROGRAM_FINALIZE - must be called to mark completion of RTA call.
+ * @program: pointer to struct program
+ *
+ * Return: total size of the descriptor in words or negative number on error.
+ */
+#define PROGRAM_FINALIZE(program) rta_program_finalize(program)
+
+/**
+ * PROGRAM_SET_36BIT_ADDR - must be called to set pointer size to 36 bits
+ * @program: pointer to struct program
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_36BIT_ADDR(program) rta_program_set_36bit_addr(program)
+
+/**
+ * PROGRAM_SET_BSWAP - must be called to enable byte swapping
+ * @program: pointer to struct program
+ *
+ * Byte swapping on a 4-byte boundary will be performed at the end - when
+ * calling PROGRAM_FINALIZE().
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_BSWAP(program) rta_program_set_bswap(program)
+
+/**
+ * WORD - must be called to insert in descriptor buffer a 32bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint32_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define WORD(program, val) rta_word(program, val)
+
+/**
+ * DWORD - must be called to insert in descriptor buffer a 64bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint64_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define DWORD(program, val) rta_dword(program, val)
+
+/**
+ * COPY_DATA - must be called to insert in descriptor buffer data larger than
+ * 64bits.
+ * @program: pointer to struct program
+ * @data: input data to be written in descriptor buffer (uint8_t *)
+ * @len: length of input data (unsigned int)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define COPY_DATA(program, data, len) rta_copy_data(program, (data), (len))
+
+/**
+ * DESC_LEN - determines job / shared descriptor buffer length (in words)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in words (unsigned int).
+ */
+#define DESC_LEN(buffer) rta_desc_len(buffer)
+
+/**
+ * DESC_BYTES - determines job / shared descriptor buffer length (in bytes)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in bytes (unsigned int).
+ */
+#define DESC_BYTES(buffer) rta_desc_bytes(buffer)
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+extern enum rta_sec_era rta_sec_era;
+
+/**
+ * rta_set_sec_era - Set SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ * @era: SEC Era (enum rta_sec_era)
+ *
+ * Return: 0 if the ERA was set successfully, -1 otherwise (int)
+ *
+ * Warning 1: Must be called *only once*, *before* using any other RTA API
+ * routine.
+ *
+ * Warning 2: *Not thread safe*.
+ */
+static inline int
+rta_set_sec_era(enum rta_sec_era era)
+{
+ if (era > MAX_SEC_ERA) {
+ rta_sec_era = DEFAULT_SEC_ERA;
+ pr_err("Unsupported SEC ERA. Defaulting to ERA %d\n",
+ DEFAULT_SEC_ERA + 1);
+ return -1;
+ }
+
+ rta_sec_era = era;
+ return 0;
+}
+
+/**
+ * rta_get_sec_era - Get SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ *
+ * Return: SEC Era (unsigned int).
+ */
+static inline unsigned int
+rta_get_sec_era(void)
+{
+ return rta_sec_era;
+}
+
+/**
+ * DOC: SEC Commands Routines
+ *
+ * Contains details of RTA wrapper routines over SEC engine commands.
+ */
+
+/**
+ * SHR_HDR - Configures Shared Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the shared
+ * descriptor should start (@c unsigned int).
+ * @flags: operational flags: RIF, DNR, CIF, SC, PD
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SHR_HDR(program, share, start_idx, flags) \
+ rta_shr_header(program, share, start_idx, flags)
+
+/**
+ * JOB_HDR - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR(program, share, start_idx, share_desc, flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags, 0)
+
+/**
+ * JOB_HDR_EXT - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ * @ext_flags: extended header flags: DSV (DECO Select Valid), DECO Id (limited
+ * by DSEL_MASK).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR_EXT(program, share, start_idx, share_desc, flags, ext_flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags | EXT, \
+ ext_flags)
+
+/**
+ * MOVE - Configures MOVE and MOVE_LEN commands
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVE(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVE, src, src_offset, dst, dst_offset, length, opt)
+
+/**
+ * MOVEB - Configures MOVEB command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command if byte swapping not enabled; else - when src/dst
+ * is descriptor buffer or MATH registers, data type is byte array when MOVE
+ * data type is 4-byte array and vice versa.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEB(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEB, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * MOVEDW - Configures MOVEDW command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command, with the following differences: data type is
+ * 8-byte array; word swapping is performed when SEC is programmed in little
+ * endian mode.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEDW(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEDW, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * FIFOLOAD - Configures FIFOLOAD command to load message data, PKHA data, IV,
+ * ICV, AAD and bit length message data into Input Data FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @src: pointer or actual data in case of immediate load; IMMED, COPY and DCOPY
+ * flags indicate action taken (inline imm data, inline ptr, inline from
+ * ptr).
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, IMMED, EXT, CLASS1, CLASS2, BOTH, FLUSH1,
+ * LAST1, LAST2, COPY, DCOPY.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOLOAD(program, data, src, length, flags) \
+ rta_fifo_load(program, data, src, length, flags)
+
+/**
+ * SEQFIFOLOAD - Configures SEQ FIFOLOAD command to load message data, PKHA
+ * data, IV, ICV, AAD and bit length message data into Input Data
+ * FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CLASS1, CLASS2, BOTH, FLUSH1, LAST1, LAST2,
+ * AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOLOAD(program, data, length, flags) \
+ rta_fifo_load(program, data, NONE, length, flags|SEQ)
+
+/**
+ * FIFOSTORE - Configures FIFOSTORE command, to move data from Output Data FIFO
+ * to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOSTORE(program, data, encrypt_flags, dst, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, dst, length, flags)
+
+/**
+ * SEQFIFOSTORE - Configures SEQ FIFOSTORE command, to move data from Output
+ * Data FIFO to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, METADATA, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOSTORE(program, data, encrypt_flags, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, 0, length, flags|SEQ)
+
+/**
+ * KEY - Configures KEY and SEQ KEY commands
+ * @program: pointer to struct program
+ * @key_dst: key store location: KEY1, KEY2, PKE, AFHA_SBOX, MDHA_SPLIT_KEY
+ * @encrypt_flags: key encryption mode: ENC, EKT, TK, NWB, PTS
+ * @src: pointer or actual data in case of immediate load (uint64_t); IMMED,
+ * COPY and DCOPY flags indicate action taken (inline imm data,
+ * inline ptr, inline from ptr).
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: for KEY: SGF, IMMED, COPY, DCOPY; for SEQKEY: SEQ,
+ * VLF, AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define KEY(program, key_dst, encrypt_flags, src, length, flags) \
+ rta_key(program, key_dst, encrypt_flags, src, length, flags)
+
+/**
+ * SEQINPTR - Configures SEQ IN PTR command
+ * @program: pointer to struct program
+ * @src: starting address for Input Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Input Sequence (uint32_t)
+ * @flags: operational flags: RBS, INL, SGF, PRE, EXT, RTO, RJD, SOP (when PRE,
+ * RTO or SOP are set, @src parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQINPTR(program, src, length, flags) \
+ rta_seq_in_ptr(program, src, length, flags)
+
+/**
+ * SEQOUTPTR - Configures SEQ OUT PTR command
+ * @program: pointer to struct program
+ * @dst: starting address for Output Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Output Sequence (uint32_t)
+ * @flags: operational flags: SGF, PRE, EXT, RTO, RST, EWS (when PRE or RTO are
+ * set, @dst parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQOUTPTR(program, dst, length, flags) \
+ rta_seq_out_ptr(program, dst, length, flags)
+
+/**
+ * ALG_OPERATION - Configures ALGORITHM OPERATION command
+ * @program: pointer to struct program
+ * @cipher_alg: algorithm to be used
+ * @aai: Additional Algorithm Information; contains mode information that is
+ * associated with the algorithm (check desc.h for specific values).
+ * @algo_state: algorithm state; defines the state of the algorithm that is
+ * being executed (check desc.h file for specific values).
+ * @icv_check: ICV checking; selects whether the algorithm should check
+ * calculated ICV with known ICV: ICV_CHECK_ENABLE,
+ * ICV_CHECK_DISABLE.
+ * @enc: selects between encryption and decryption: DIR_ENC, DIR_DEC
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define ALG_OPERATION(program, cipher_alg, aai, algo_state, icv_check, enc) \
+ rta_operation(program, cipher_alg, aai, algo_state, icv_check, enc)
+
+/**
+ * PROTOCOL - Configures PROTOCOL OPERATION command
+ * @program: pointer to struct program
+ * @optype: operation type: OP_TYPE_UNI_PROTOCOL / OP_TYPE_DECAP_PROTOCOL /
+ * OP_TYPE_ENCAP_PROTOCOL.
+ * @protid: protocol identifier value (check desc.h file for specific values)
+ * @protoinfo: protocol dependent value (check desc.h file for specific values)
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PROTOCOL(program, optype, protid, protoinfo) \
+ rta_proto_operation(program, optype, protid, protoinfo)
+
+/**
+ * DKP_PROTOCOL - Configures DKP (Derived Key Protocol) PROTOCOL command
+ * @program: pointer to struct program
+ * @protid: protocol identifier value - one of the following:
+ * OP_PCLID_DKP_{MD5 | SHA1 | SHA224 | SHA256 | SHA384 | SHA512}
+ * @key_src: How the initial ("negotiated") key is provided to the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_SRC_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @key_dst: How the derived ("split") key is returned by the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_DST_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @keylen: length of the initial key, in bytes (uint16_t)
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_type: enum rta_data_type
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define DKP_PROTOCOL(program, protid, key_src, key_dst, keylen, key, key_type) \
+ rta_dkp_proto(program, protid, key_src, key_dst, keylen, key, key_type)
+
+/**
+ * PKHA_OPERATION - Configures PKHA OPERATION command
+ * @program: pointer to struct program
+ * @op_pkha: PKHA operation; indicates the modular arithmetic function to
+ * execute (check desc.h file for specific values).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PKHA_OPERATION(program, op_pkha) rta_pkha_operation(program, op_pkha)
+
+/**
+ * JUMP - Configures JUMP command
+ * @program: pointer to struct program
+ * @addr: local offset for local jumps or address pointer for non-local jumps;
+ * IMM or PTR macros must be used to indicate type.
+ * @jump_type: type of action taken by jump (enum rta_jump_type)
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: operational flags - DONE1, DONE2, BOTH; various
+ * sharing and wait conditions (JSL = 1) - NIFP, NIP, NOP, NCP, CALM,
+ * SELF, SHARED, JQP; Math and PKHA status conditions (JSL = 0) - Z, N,
+ * NV, C, PK0, PK1, PKP.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP(program, addr, jump_type, test_type, cond) \
+ rta_jump(program, addr, jump_type, test_type, cond, NONE)
+
+/**
+ * JUMP_INC - Configures JUMP_INC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_INC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_INC, test_type, cond, src_dst)
+
+/**
+ * JUMP_DEC - Configures JUMP_DEC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_DEC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_DEC, test_type, cond, src_dst)
+
+/**
+ * LOAD - Configures LOAD command to load data registers from descriptor or from
+ * a memory location.
+ * @program: pointer to struct program
+ * @addr: immediate value or pointer to the data to be loaded; IMMED, COPY and
+ * DCOPY flags indicate action taken (inline imm data, inline ptr, inline
+ * from ptr).
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define LOAD(program, addr, dst, offset, length, flags) \
+ rta_load(program, addr, dst, offset, length, flags)
+
+/**
+ * SEQLOAD - Configures SEQ LOAD command to load data registers from descriptor
+ * or from a memory location.
+ * @program: pointer to struct program
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQLOAD(program, dst, offset, length, flags) \
+ rta_load(program, NONE, dst, offset, length, flags|SEQ)
+
+/**
+ * STORE - Configures STORE command to read data from registers and write them
+ * to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define STORE(program, src, offset, dst, length, flags) \
+ rta_store(program, src, offset, dst, length, flags)
+
+/**
+ * SEQSTORE - Configures SEQ STORE command to read data from registers and write
+ * them to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: SGF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQSTORE(program, src, offset, length, flags) \
+ rta_store(program, src, offset, NONE, length, flags|SEQ)
+
+/**
+ * MATHB - Configures MATHB command to perform binary operations
+ * @program: pointer to struct program
+ * @operand1: first operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, SHLD.
+ * @operand2: second operand: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD,
+ * OFIFO, JOBSRC, ZERO, ONE, Immediate value. IMMED2 must be used to
+ * indicate immediate value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: IFB, NFU, STL, SWP, IMMED, IMMED2
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHB(program, operand1, operator, operand2, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, operand2, result, \
+ length, opt)
+
+/**
+ * MATHI - Configures MATHI command to perform binary operations
+ * @program: pointer to struct program
+ * @operand: if !SSEL: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE.
+ * if SSEL: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD, OFIFO,
+ * JOBSRC, ZERO, ONE.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, FBYT (for !SSEL only).
+ * @imm: Immediate value (uint8_t). IMMED must be used to indicate immediate
+ * value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int). @imm is left-extended with zeros if needed.
+ * @opt: operational flags: NFU, SSEL, SWP, IMMED
+ *
+ * If !SSEL, @operand <@operator> @imm -> @result
+ * If SSEL, @imm <@operator> @operand -> @result
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHI(program, operand, operator, imm, result, length, opt) \
+ rta_mathi(program, operand, MATH_FUN_##operator, imm, result, length, \
+ opt)
+
+/**
+ * MATHU - Configures MATHU command to perform unary operations
+ * @program: pointer to struct program
+ * @operand1: operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ZBYT, BSWAP
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: NFU, STL, SWP, IMMED
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHU(program, operand1, operator, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, NONE, result, length, \
+ opt)
+
+/**
+ * SIGNATURE - Configures SIGNATURE command
+ * @program: pointer to struct program
+ * @sign_type: signature type: SIGN_TYPE_FINAL, SIGN_TYPE_FINAL_RESTORE,
+ * SIGN_TYPE_FINAL_NONZERO, SIGN_TYPE_IMM_2, SIGN_TYPE_IMM_3,
+ * SIGN_TYPE_IMM_4.
+ *
+ * After SIGNATURE command, DWORD or WORD must be used to insert signature in
+ * descriptor buffer.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SIGNATURE(program, sign_type) rta_signature(program, sign_type)
+
+/**
+ * NFIFOADD - Configures NFIFO command, a shortcut of RTA Load command to write
+ * to iNfo FIFO.
+ * @program: pointer to struct program
+ * @src: source for the input data in Alignment Block:IFIFO, OFIFO, PAD,
+ * MSGOUTSNOOP, ALTSOURCE, OFIFO_SYNC, MSGOUTSNOOP_ALT.
+ * @data: type of data that is going through the Input Data FIFO: MSG, MSG1,
+ * MSG2, IV1, IV2, ICV1, ICV2, SAD1, AAD1, AAD2, AFHA_SBOX, SKIP,
+ * PKHA registers, AB1, AB2, ABD.
+ * @length: length of the data copied in FIFO registers (uint32_t)
+ * @flags: select options between:
+ * -operational flags: LAST1, LAST2, FLUSH1, FLUSH2, OC, BP
+ * -when PAD is selected as source: BM, PR, PS
+ * -padding type: <em>PAD_ZERO, PAD_NONZERO, PAD_INCREMENT, PAD_RANDOM,
+ * PAD_ZERO_N1, PAD_NONZERO_0, PAD_N1, PAD_NONZERO_N
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define NFIFOADD(program, src, data, length, flags) \
+ rta_nfifo_load(program, src, data, length, flags)
+
+/**
+ * DOC: Self Referential Code Management Routines
+ *
+ * Contains details of RTA self referential code routines.
+ */
+
+/**
+ * REFERENCE - initialize a variable used for storing an index inside a
+ * descriptor buffer.
+ * @ref: reference to a descriptor buffer's index where an update is required
+ * with a value that will be known latter in the program flow.
+ */
+#define REFERENCE(ref) int ref = -1
+
+/**
+ * LABEL - initialize a variable used for storing an index inside a descriptor
+ * buffer.
+ * @label: label stores the value with what should be updated the REFERENCE line
+ * in the descriptor buffer.
+ */
+#define LABEL(label) unsigned int label = 0
+
+/**
+ * SET_LABEL - set a LABEL value
+ * @program: pointer to struct program
+ * @label: value that will be inserted in a line previously written in the
+ * descriptor buffer.
+ */
+#define SET_LABEL(program, label) (label = rta_set_label(program))
+
+/**
+ * PATCH_JUMP - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For JUMP command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_JUMP(program, line, new_ref) rta_patch_jmp(program, line, new_ref)
+
+/**
+ * PATCH_MOVE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For MOVE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_MOVE(program, line, new_ref) \
+ rta_patch_move(program, line, new_ref)
+
+/**
+ * PATCH_LOAD - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For LOAD command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_LOAD(program, line, new_ref) \
+ rta_patch_load(program, line, new_ref)
+
+/**
+ * PATCH_STORE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For STORE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_STORE(program, line, new_ref) \
+ rta_patch_store(program, line, new_ref)
+
+/**
+ * PATCH_HDR - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For HEADER command, the value represents the start index field.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_HDR(program, line, new_ref) \
+ rta_patch_header(program, line, new_ref)
+
+/**
+ * PATCH_RAW - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @mask: mask to be used for applying the new value (unsigned int). The mask
+ * selects which bits from the provided @new_val are taken into
+ * consideration when overwriting the existing value.
+ * @new_val: updated value that will be masked using the provided mask value
+ * and inserted in descriptor buffer at the specified line.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_RAW(program, line, mask, new_val) \
+ rta_patch_raw(program, line, mask, new_val)
+
+#endif /* __RTA_RTA_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
new file mode 100644
index 00000000..8c807aaa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
+#define __RTA_FIFO_LOAD_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t fifo_load_table[][2] = {
+/*1*/ { PKA0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A0 },
+ { PKA1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A1 },
+ { PKA2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A2 },
+ { PKA3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A3 },
+ { PKB0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B0 },
+ { PKB1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B1 },
+ { PKB2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B2 },
+ { PKB3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B3 },
+ { PKA, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A },
+ { PKB, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B },
+ { PKN, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_N },
+ { SKIP, FIFOLD_CLASS_SKIP },
+ { MSG1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG },
+ { MSG2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG },
+ { MSGOUTSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG1OUT2 },
+ { MSGINSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG },
+ { IV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV },
+ { IV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_IV },
+ { AAD1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_AAD },
+ { ICV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV },
+ { ICV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV },
+ { BIT_DATA, FIFOLD_TYPE_BITDATA },
+/*23*/ { IFIFO, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_NOINFOFIFO }
+};
+
+/*
+ * Allowed FIFO_LOAD input data types for each SEC Era.
+ * Values represent the number of entries from fifo_load_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_load_table_sz[] = {22, 22, 23, 23,
+ 23, 23, 23, 23};
+
+static inline int
+rta_fifo_load(struct program *program, uint32_t src,
+ uint64_t loc, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t ext_length = 0, val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_LOAD;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_LOAD;
+ }
+
+ /* Parameters checking */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQ FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) && (flags & AIDF)) {
+ pr_err("SEQ FIFO LOAD: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & VLF) && ((flags & EXT) || (length >> 16))) {
+ pr_err("SEQ FIFO LOAD: Invalid usage of VLF\n");
+ goto err;
+ }
+ } else {
+ if (src == SKIP) {
+ pr_err("FIFO LOAD: Invalid src\n");
+ goto err;
+ }
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("FIFO LOAD: Invalid usage of SGF and IMM\n");
+ goto err;
+ }
+ if ((flags & IMMED) && ((flags & EXT) || (length >> 16))) {
+ pr_err("FIFO LOAD: Invalid usage of EXT and IMM\n");
+ goto err;
+ }
+ }
+
+ /* write input data type field */
+ ret = __rta_map_opcode(src, fifo_load_table,
+ fifo_load_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO LOAD: Source value is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (flags & CLASS1)
+ opcode |= FIFOLD_CLASS_CLASS1;
+ if (flags & CLASS2)
+ opcode |= FIFOLD_CLASS_CLASS2;
+ if (flags & BOTH)
+ opcode |= FIFOLD_CLASS_BOTH;
+
+ /* write fields: SGF|VLF, IMM, [LC1, LC2, F1] */
+ if (flags & FLUSH1)
+ opcode |= FIFOLD_TYPE_FLUSH1;
+ if (flags & LAST1)
+ opcode |= FIFOLD_TYPE_LAST1;
+ if (flags & LAST2)
+ opcode |= FIFOLD_TYPE_LAST2;
+ if (!is_seq_cmd) {
+ if (flags & SGF)
+ opcode |= FIFOLDST_SGF;
+ if (flags & IMMED)
+ opcode |= FIFOLD_IMM;
+ } else {
+ if (flags & VLF)
+ opcode |= FIFOLDST_VLF;
+ if (flags & AIDF)
+ opcode |= FIFOLD_AIDF;
+ }
+
+ /*
+ * Verify if extended length is required. In case of BITDATA, calculate
+ * number of full bytes and additional valid bits.
+ */
+ if ((flags & EXT) || (length >> 16)) {
+ opcode |= FIFOLDST_EXT;
+ if (src == BIT_DATA) {
+ ext_length = (length / 8);
+ length = (length % 8);
+ } else {
+ ext_length = length;
+ length = 0;
+ }
+ }
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (flags & IMMED)
+ __rta_inline_data(program, loc, flags & __COPY_MASK, length);
+ else if (!is_seq_cmd)
+ __rta_out64(program, program->ps, loc);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, ext_length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static const uint32_t fifo_store_table[][2] = {
+/*1*/ { PKA0, FIFOST_TYPE_PKHA_A0 },
+ { PKA1, FIFOST_TYPE_PKHA_A1 },
+ { PKA2, FIFOST_TYPE_PKHA_A2 },
+ { PKA3, FIFOST_TYPE_PKHA_A3 },
+ { PKB0, FIFOST_TYPE_PKHA_B0 },
+ { PKB1, FIFOST_TYPE_PKHA_B1 },
+ { PKB2, FIFOST_TYPE_PKHA_B2 },
+ { PKB3, FIFOST_TYPE_PKHA_B3 },
+ { PKA, FIFOST_TYPE_PKHA_A },
+ { PKB, FIFOST_TYPE_PKHA_B },
+ { PKN, FIFOST_TYPE_PKHA_N },
+ { PKE, FIFOST_TYPE_PKHA_E_JKEK },
+ { RNG, FIFOST_TYPE_RNGSTORE },
+ { RNGOFIFO, FIFOST_TYPE_RNGFIFO },
+ { AFHA_SBOX, FIFOST_TYPE_AF_SBOX_JKEK },
+ { MDHA_SPLIT_KEY, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_SPLIT_KEK },
+ { MSG, FIFOST_TYPE_MESSAGE_DATA },
+ { KEY1, FIFOST_CLASS_CLASS1KEY | FIFOST_TYPE_KEY_KEK },
+ { KEY2, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_KEY_KEK },
+ { OFIFO, FIFOST_TYPE_OUTFIFO_KEK},
+ { SKIP, FIFOST_TYPE_SKIP },
+/*22*/ { METADATA, FIFOST_TYPE_METADATA},
+ { MSG_CKSUM, FIFOST_TYPE_MESSAGE_DATA2 }
+};
+
+/*
+ * Allowed FIFO_STORE output data types for each SEC Era.
+ * Values represent the number of entries from fifo_store_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_store_table_sz[] = {21, 21, 21, 21,
+ 22, 22, 22, 23};
+
+static inline int
+rta_fifo_store(struct program *program, uint32_t src,
+ uint32_t encrypt_flags, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_STORE;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_STORE;
+ }
+
+ /* Parameter checking */
+ if (is_seq_cmd) {
+ if ((flags & VLF) && ((length >> 16) || (flags & EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid usage of VLF\n");
+ goto err;
+ }
+ if (dst) {
+ pr_err("SEQ FIFO STORE: Invalid command\n");
+ goto err;
+ }
+ if ((src == METADATA) && (flags & (CONT | EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid flags\n");
+ goto err;
+ }
+ } else {
+ if (((src == RNGOFIFO) && ((dst) || (flags & EXT))) ||
+ (src == METADATA)) {
+ pr_err("FIFO STORE: Invalid destination\n");
+ goto err;
+ }
+ }
+ if ((rta_sec_era == RTA_SEC_ERA_7) && (src == AFHA_SBOX)) {
+ pr_err("FIFO STORE: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write output data type field */
+ ret = __rta_map_opcode(src, fifo_store_table,
+ fifo_store_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO STORE: Source type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (encrypt_flags & TK)
+ opcode |= (0x1 << FIFOST_TYPE_SHIFT);
+ if (encrypt_flags & EKT) {
+ if (rta_sec_era == RTA_SEC_ERA_1) {
+ pr_err("FIFO STORE: AES-CCM source types not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ opcode |= (0x10 << FIFOST_TYPE_SHIFT);
+ opcode &= (uint32_t)~(0x20 << FIFOST_TYPE_SHIFT);
+ }
+
+ /* write flags fields */
+ if (flags & CONT)
+ opcode |= FIFOST_CONT;
+ if ((flags & VLF) && (is_seq_cmd))
+ opcode |= FIFOLDST_VLF;
+ if ((flags & SGF) && (!is_seq_cmd))
+ opcode |= FIFOLDST_SGF;
+ if (flags & CLASS1)
+ opcode |= FIFOST_CLASS_CLASS1KEY;
+ if (flags & CLASS2)
+ opcode |= FIFOST_CLASS_CLASS2KEY;
+ if (flags & BOTH)
+ opcode |= FIFOST_CLASS_BOTH;
+
+ /* Verify if extended length is required */
+ if ((length >> 16) || (flags & EXT))
+ opcode |= FIFOLDST_EXT;
+ else
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer field */
+ if ((!is_seq_cmd) && (dst))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_FIFO_LOAD_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
new file mode 100644
index 00000000..0c7ea938
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_HEADER_CMD_H__
+#define __RTA_HEADER_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed job header flags for each SEC Era. */
+static const uint32_t job_header_flags[] = {
+ DNR | TD | MTD | SHR | REO,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | EXT
+};
+
+/* Allowed shared header flags for each SEC Era. */
+static const uint32_t shr_header_flags[] = {
+ DNR | SC | PD,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF
+};
+
+static inline int
+rta_shr_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint32_t flags)
+{
+ uint32_t opcode = CMD_SHARED_DESC_HDR;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~shr_header_flags[rta_sec_era]) {
+ pr_err("SHR_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ default:
+ pr_err("SHR_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & CIF)
+ opcode |= HDR_CLEAR_IFIFO;
+ if (flags & SC)
+ opcode |= HDR_SAVECTX;
+ if (flags & PD)
+ opcode |= HDR_PROP_DNR;
+ if (flags & RIF)
+ opcode |= HDR_RIF;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1)
+ program->shrhdr = program->buffer;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+static inline int
+rta_job_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint64_t shr_desc, uint32_t flags,
+ uint32_t ext_flags)
+{
+ uint32_t opcode = CMD_DESC_HDR;
+ uint32_t hdr_ext = 0;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~job_header_flags[rta_sec_era]) {
+ pr_err("JOB_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ case SHR_DEFER:
+ opcode |= HDR_SHARE_DEFER;
+ break;
+ default:
+ pr_err("JOB_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & TD) && (flags & REO)) {
+ pr_err("JOB_DESC: REO flag not supported for trusted descriptors. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (flags & MTD) && !(flags & TD)) {
+ pr_err("JOB_DESC: Trying to MTD a descriptor that is not a TD. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & EXT) && !(flags & SHR) && (start_idx < 2)) {
+ pr_err("JOB_DESC: Start index must be >= 2 in case of no SHR and EXT. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= ((start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK);
+
+ if (flags & EXT) {
+ opcode |= HDR_EXT;
+
+ if (ext_flags & DSV) {
+ hdr_ext |= HDR_EXT_DSEL_VALID;
+ hdr_ext |= ext_flags & DSEL_MASK;
+ }
+
+ if (ext_flags & FTD) {
+ if (rta_sec_era <= RTA_SEC_ERA_5) {
+ pr_err("JOB_DESC: Fake trusted descriptor not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ hdr_ext |= HDR_EXT_FTD;
+ }
+ }
+ if (flags & RSMS)
+ opcode |= HDR_RSLS;
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & TD)
+ opcode |= HDR_TRUSTED;
+ if (flags & MTD)
+ opcode |= HDR_MAKE_TRUSTED;
+ if (flags & REO)
+ opcode |= HDR_REVERSE;
+ if (flags & SHR)
+ opcode |= HDR_SHARED;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1) {
+ program->jobhdr = program->buffer;
+
+ if (opcode & HDR_SHARED)
+ __rta_out64(program, program->ps, shr_desc);
+ }
+
+ if (flags & EXT)
+ __rta_out32(program, hdr_ext);
+
+ /* Note: descriptor length is set in program_finalize routine */
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_HEADER_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
new file mode 100644
index 00000000..546d22e9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_JUMP_CMD_H__
+#define __RTA_JUMP_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t jump_test_cond[][2] = {
+ { NIFP, JUMP_COND_NIFP },
+ { NIP, JUMP_COND_NIP },
+ { NOP, JUMP_COND_NOP },
+ { NCP, JUMP_COND_NCP },
+ { CALM, JUMP_COND_CALM },
+ { SELF, JUMP_COND_SELF },
+ { SHRD, JUMP_COND_SHRD },
+ { JQP, JUMP_COND_JQP },
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C },
+ { PK_0, JUMP_COND_PK_0 },
+ { PK_GCD_1, JUMP_COND_PK_GCD_1 },
+ { PK_PRIME, JUMP_COND_PK_PRIME },
+ { CLASS1, JUMP_CLASS_CLASS1 },
+ { CLASS2, JUMP_CLASS_CLASS2 },
+ { BOTH, JUMP_CLASS_BOTH }
+};
+
+static const uint32_t jump_test_math_cond[][2] = {
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C }
+};
+
+static const uint32_t jump_src_dst[][2] = {
+ { MATH0, JUMP_SRC_DST_MATH0 },
+ { MATH1, JUMP_SRC_DST_MATH1 },
+ { MATH2, JUMP_SRC_DST_MATH2 },
+ { MATH3, JUMP_SRC_DST_MATH3 },
+ { DPOVRD, JUMP_SRC_DST_DPOVRD },
+ { SEQINSZ, JUMP_SRC_DST_SEQINLEN },
+ { SEQOUTSZ, JUMP_SRC_DST_SEQOUTLEN },
+ { VSEQINSZ, JUMP_SRC_DST_VARSEQINLEN },
+ { VSEQOUTSZ, JUMP_SRC_DST_VARSEQOUTLEN }
+};
+
+static inline int
+rta_jump(struct program *program, uint64_t address,
+ enum rta_jump_type jump_type,
+ enum rta_jump_cond test_type,
+ uint32_t test_condition, uint32_t src_dst)
+{
+ uint32_t opcode = CMD_JUMP;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ if (((jump_type == GOSUB) || (jump_type == RETURN)) &&
+ (rta_sec_era < RTA_SEC_ERA_4)) {
+ pr_err("JUMP: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (((jump_type == LOCAL_JUMP_INC) || (jump_type == LOCAL_JUMP_DEC)) &&
+ (rta_sec_era <= RTA_SEC_ERA_5)) {
+ pr_err("JUMP_INCDEC: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (jump_type) {
+ case (LOCAL_JUMP):
+ /*
+ * opcode |= JUMP_TYPE_LOCAL;
+ * JUMP_TYPE_LOCAL is 0
+ */
+ break;
+ case (HALT):
+ opcode |= JUMP_TYPE_HALT;
+ break;
+ case (HALT_STATUS):
+ opcode |= JUMP_TYPE_HALT_USER;
+ break;
+ case (FAR_JUMP):
+ opcode |= JUMP_TYPE_NONLOCAL;
+ break;
+ case (GOSUB):
+ opcode |= JUMP_TYPE_GOSUB;
+ break;
+ case (RETURN):
+ opcode |= JUMP_TYPE_RETURN;
+ break;
+ case (LOCAL_JUMP_INC):
+ opcode |= JUMP_TYPE_LOCAL_INC;
+ break;
+ case (LOCAL_JUMP_DEC):
+ opcode |= JUMP_TYPE_LOCAL_DEC;
+ break;
+ default:
+ pr_err("JUMP: Invalid jump type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ switch (test_type) {
+ case (ALL_TRUE):
+ /*
+ * opcode |= JUMP_TEST_ALL;
+ * JUMP_TEST_ALL is 0
+ */
+ break;
+ case (ALL_FALSE):
+ opcode |= JUMP_TEST_INVALL;
+ break;
+ case (ANY_TRUE):
+ opcode |= JUMP_TEST_ANY;
+ break;
+ case (ANY_FALSE):
+ opcode |= JUMP_TEST_INVANY;
+ break;
+ default:
+ pr_err("JUMP: test type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ /* write test condition field */
+ if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
+ __rta_map_flags(test_condition, jump_test_cond,
+ ARRAY_SIZE(jump_test_cond), &opcode);
+ } else {
+ uint32_t val = 0;
+
+ ret = __rta_map_opcode(src_dst, jump_src_dst,
+ ARRAY_SIZE(jump_src_dst), &val);
+ if (ret < 0) {
+ pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ __rta_map_flags(test_condition, jump_test_math_cond,
+ ARRAY_SIZE(jump_test_math_cond), &opcode);
+ }
+
+ /* write local offset field for local jumps and user-defined halt */
+ if ((jump_type == LOCAL_JUMP) || (jump_type == LOCAL_JUMP_INC) ||
+ (jump_type == LOCAL_JUMP_DEC) || (jump_type == GOSUB) ||
+ (jump_type == HALT_STATUS))
+ opcode |= (uint32_t)(address & JUMP_OFFSET_MASK);
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (jump_type == FAR_JUMP)
+ __rta_out64(program, program->ps, address);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_JUMP_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
new file mode 100644
index 00000000..1ec21234
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_KEY_CMD_H__
+#define __RTA_KEY_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed encryption flags for each SEC Era */
+static const uint32_t key_enc_flags[] = {
+ ENC,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS
+};
+
+static inline int
+rta_key(struct program *program, uint32_t key_dst,
+ uint32_t encrypt_flags, uint64_t src, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if (encrypt_flags & ~key_enc_flags[rta_sec_era]) {
+ pr_err("KEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write cmd type */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_KEY;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_KEY;
+ }
+
+ /* check parameters */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQKEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) &&
+ ((flags & VLF) || (flags & AIDF))) {
+ pr_err("SEQKEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ } else {
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((flags & SGF) && (flags & IMMED)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ if ((encrypt_flags & PTS) &&
+ ((encrypt_flags & ENC) || (encrypt_flags & NWB) ||
+ (key_dst == PKE))) {
+ pr_err("KEY: Invalid flag / destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (key_dst == AFHA_SBOX) {
+ if (rta_sec_era == RTA_SEC_ERA_7) {
+ pr_err("KEY: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * Sbox data loaded into the ARC-4 processor must be exactly
+ * 258 bytes long, or else a data sequence error is generated.
+ */
+ if (length != 258) {
+ pr_err("KEY: Invalid length. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /* write key destination and class fields */
+ switch (key_dst) {
+ case (KEY1):
+ opcode |= KEY_DEST_CLASS1;
+ break;
+ case (KEY2):
+ opcode |= KEY_DEST_CLASS2;
+ break;
+ case (PKE):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_PKHA_E;
+ break;
+ case (AFHA_SBOX):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_AFHA_SBOX;
+ break;
+ case (MDHA_SPLIT_KEY):
+ opcode |= KEY_DEST_CLASS2 | KEY_DEST_MDHA_SPLIT;
+ break;
+ default:
+ pr_err("KEY: Invalid destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* write key length */
+ length &= KEY_LENGTH_MASK;
+ opcode |= length;
+
+ /* write key command specific flags */
+ if (encrypt_flags & ENC) {
+ /* Encrypted (black) keys must be padded to 8 bytes (CCM) or
+ * 16 bytes (ECB) depending on EKT bit. AES-CCM encrypted keys
+ * (EKT = 1) have 6-byte nonce and 6-byte MAC after padding.
+ */
+ opcode |= KEY_ENC;
+ if (encrypt_flags & EKT) {
+ opcode |= KEY_EKT;
+ length = ALIGN(length, 8);
+ length += 12;
+ } else {
+ length = ALIGN(length, 16);
+ }
+ if (encrypt_flags & TK)
+ opcode |= KEY_TK;
+ }
+ if (encrypt_flags & NWB)
+ opcode |= KEY_NWB;
+ if (encrypt_flags & PTS)
+ opcode |= KEY_PTS;
+
+ /* write general command flags */
+ if (!is_seq_cmd) {
+ if (flags & IMMED)
+ opcode |= KEY_IMM;
+ if (flags & SGF)
+ opcode |= KEY_SGF;
+ } else {
+ if (flags & AIDF)
+ opcode |= KEY_AIDF;
+ if (flags & VLF)
+ opcode |= KEY_VLF;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_KEY_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
new file mode 100644
index 00000000..f3b0dcfc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_LOAD_CMD_H__
+#define __RTA_LOAD_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed length and offset masks for each SEC Era in case DST = DCTRL */
+static const uint32_t load_len_mask_allowed[] = {
+ 0x000000ee,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe
+};
+
+static const uint32_t load_off_mask_allowed[] = {
+ 0x0000000f,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff
+};
+
+#define IMM_MUST 0
+#define IMM_CAN 1
+#define IMM_NO 2
+#define IMM_DSNM 3 /* it doesn't matter the src type */
+
+enum e_lenoff {
+ LENOF_03,
+ LENOF_4,
+ LENOF_48,
+ LENOF_448,
+ LENOF_18,
+ LENOF_32,
+ LENOF_24,
+ LENOF_16,
+ LENOF_8,
+ LENOF_128,
+ LENOF_256,
+ DSNM /* it doesn't matter the length/offset values */
+};
+
+struct load_map {
+ uint32_t dst;
+ uint32_t dst_opcode;
+ enum e_lenoff len_off;
+ uint8_t imm_src;
+
+};
+
+static const struct load_map load_dst[] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { CCTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CHACTRL,
+ LENOF_4, IMM_MUST },
+ { DCTRL, LDST_CLASS_DECO | LDST_IMM | LDST_SRCDST_WORD_DECOCTRL,
+ DSNM, IMM_DSNM },
+ { ICTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_IRQCTRL,
+ LENOF_4, IMM_MUST },
+ { DPOVRD, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_PCLOVRD,
+ LENOF_4, IMM_MUST },
+ { CLRW, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CLRW,
+ LENOF_4, IMM_MUST },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ,
+ LENOF_4, IMM_MUST },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ,
+ LENOF_4, IMM_MUST },
+ { ALTDS1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ALTDS_CLASS1,
+ LENOF_448, IMM_MUST },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ,
+ LENOF_4, IMM_MUST, },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ,
+ LENOF_4, IMM_MUST },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ,
+ LENOF_4, IMM_MUST },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ,
+ LENOF_4, IMM_MUST },
+ { NFIFO, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO,
+ LENOF_48, IMM_MUST },
+ { IFIFO, LDST_SRCDST_BYTE_INFIFO, LENOF_18, IMM_MUST },
+ { OFIFO, LDST_SRCDST_BYTE_OUTFIFO, LENOF_18, IMM_MUST },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0,
+ LENOF_32, IMM_CAN },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1,
+ LENOF_24, IMM_CAN },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2,
+ LENOF_16, IMM_CAN },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3,
+ LENOF_8, IMM_CAN },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { KEY1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { KEY2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF,
+ LENOF_256, IMM_NO },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID,
+ LENOF_448, IMM_MUST },
+/*32*/ { IDFNS, LDST_SRCDST_WORD_IFNSR, LENOF_18, IMM_MUST },
+ { ODFNS, LDST_SRCDST_WORD_OFNSR, LENOF_18, IMM_MUST },
+ { ALTSOURCE, LDST_SRCDST_BYTE_ALTSOURCE, LENOF_18, IMM_MUST },
+/*35*/ { NFIFO_SZL, LDST_SRCDST_WORD_INFO_FIFO_SZL, LENOF_48, IMM_MUST },
+ { NFIFO_SZM, LDST_SRCDST_WORD_INFO_FIFO_SZM, LENOF_03, IMM_MUST },
+ { NFIFO_L, LDST_SRCDST_WORD_INFO_FIFO_L, LENOF_48, IMM_MUST },
+ { NFIFO_M, LDST_SRCDST_WORD_INFO_FIFO_M, LENOF_03, IMM_MUST },
+ { SZL, LDST_SRCDST_WORD_SZL, LENOF_48, IMM_MUST },
+/*40*/ { SZM, LDST_SRCDST_WORD_SZM, LENOF_03, IMM_MUST }
+};
+
+/*
+ * Allowed LOAD destinations for each SEC Era.
+ * Values represent the number of entries from load_dst[] that are supported.
+ */
+static const unsigned int load_dst_sz[] = { 31, 34, 34, 40, 40, 40, 40, 40 };
+
+static inline int
+load_check_len_offset(int pos, uint32_t length, uint32_t offset)
+{
+ if ((load_dst[pos].dst == DCTRL) &&
+ ((length & ~load_len_mask_allowed[rta_sec_era]) ||
+ (offset & ~load_off_mask_allowed[rta_sec_era])))
+ goto err;
+
+ switch (load_dst[pos].len_off) {
+ case (LENOF_03):
+ if ((length > 3) || (offset))
+ goto err;
+ break;
+ case (LENOF_4):
+ if ((length != 4) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_48):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_448):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 4) && (offset == 4)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_18):
+ if ((length < 1) || (length > 8) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_32):
+ if ((length > 32) || (offset > 32) || ((offset + length) > 32))
+ goto err;
+ break;
+ case (LENOF_24):
+ if ((length > 24) || (offset > 24) || ((offset + length) > 24))
+ goto err;
+ break;
+ case (LENOF_16):
+ if ((length > 16) || (offset > 16) || ((offset + length) > 16))
+ goto err;
+ break;
+ case (LENOF_8):
+ if ((length > 8) || (offset > 8) || ((offset + length) > 8))
+ goto err;
+ break;
+ case (LENOF_128):
+ if ((length > 128) || (offset > 128) ||
+ ((offset + length) > 128))
+ goto err;
+ break;
+ case (LENOF_256):
+ if ((length < 1) || (length > 256) || ((length + offset) > 256))
+ goto err;
+ break;
+ case (DSNM):
+ break;
+ default:
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static inline int
+rta_load(struct program *program, uint64_t src, uint64_t dst,
+ uint32_t offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ int pos = -1, ret = -EINVAL;
+ unsigned int start_pc = program->current_pc, i;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_LOAD;
+ else
+ opcode = CMD_LOAD;
+
+ if ((length & 0xffffff00) || (offset & 0xffffff00)) {
+ pr_err("LOAD: Bad length/offset passed. Should be 8 bits\n");
+ goto err;
+ }
+
+ if (flags & SGF)
+ opcode |= LDST_SGF;
+ if (flags & VLF)
+ opcode |= LDST_VLF;
+
+ /* check load destination, length and offset and source type */
+ for (i = 0; i < load_dst_sz[rta_sec_era]; i++)
+ if (dst == load_dst[i].dst) {
+ pos = (int)i;
+ break;
+ }
+ if (-1 == pos) {
+ pr_err("LOAD: Invalid dst. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ if (load_dst[pos].imm_src == IMM_NO) {
+ pr_err("LOAD: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= LDST_IMM;
+ } else if (load_dst[pos].imm_src == IMM_MUST) {
+ pr_err("LOAD IMM: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ ret = load_check_len_offset(pos, length, offset);
+ if (ret < 0) {
+ pr_err("LOAD: Invalid length/offset. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= load_dst[pos].dst_opcode;
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if (dst == DESCBUF) {
+ opcode |= (length >> 2);
+ opcode |= ((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* DECO CONTROL: skip writing pointer of imm data */
+ if (dst == DCTRL)
+ return (int)start_pc;
+
+ /*
+ * For data copy, 3 possible ways to specify how to copy data:
+ * - IMMED & !COPY: copy data directly from src( max 8 bytes)
+ * - IMMED & COPY: copy data imm from the location specified by user
+ * - !IMMED and is not SEQ cmd: copy the address
+ */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else if (!(flags & SEQ))
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_LOAD_CMD_H__*/
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
new file mode 100644
index 00000000..5b28cbab
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_MATH_CMD_H__
+#define __RTA_MATH_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t math_op1[][2] = {
+/*1*/ { MATH0, MATH_SRC0_REG0 },
+ { MATH1, MATH_SRC0_REG1 },
+ { MATH2, MATH_SRC0_REG2 },
+ { MATH3, MATH_SRC0_REG3 },
+ { SEQINSZ, MATH_SRC0_SEQINLEN },
+ { SEQOUTSZ, MATH_SRC0_SEQOUTLEN },
+ { VSEQINSZ, MATH_SRC0_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC0_VARSEQOUTLEN },
+ { ZERO, MATH_SRC0_ZERO },
+/*10*/ { NONE, 0 }, /* dummy value */
+ { DPOVRD, MATH_SRC0_DPOVRD },
+ { ONE, MATH_SRC0_ONE }
+};
+
+/*
+ * Allowed MATH op1 sources for each SEC Era.
+ * Values represent the number of entries from math_op1[] that are supported.
+ */
+static const unsigned int math_op1_sz[] = {10, 10, 12, 12, 12, 12, 12, 12};
+
+static const uint32_t math_op2[][2] = {
+/*1*/ { MATH0, MATH_SRC1_REG0 },
+ { MATH1, MATH_SRC1_REG1 },
+ { MATH2, MATH_SRC1_REG2 },
+ { MATH3, MATH_SRC1_REG3 },
+ { ABD, MATH_SRC1_INFIFO },
+ { OFIFO, MATH_SRC1_OUTFIFO },
+ { ONE, MATH_SRC1_ONE },
+/*8*/ { NONE, 0 }, /* dummy value */
+ { JOBSRC, MATH_SRC1_JOBSOURCE },
+ { DPOVRD, MATH_SRC1_DPOVRD },
+ { VSEQINSZ, MATH_SRC1_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC1_VARSEQOUTLEN },
+/*13*/ { ZERO, MATH_SRC1_ZERO }
+};
+
+/*
+ * Allowed MATH op2 sources for each SEC Era.
+ * Values represent the number of entries from math_op2[] that are supported.
+ */
+static const unsigned int math_op2_sz[] = {8, 9, 13, 13, 13, 13, 13, 13};
+
+static const uint32_t math_result[][2] = {
+/*1*/ { MATH0, MATH_DEST_REG0 },
+ { MATH1, MATH_DEST_REG1 },
+ { MATH2, MATH_DEST_REG2 },
+ { MATH3, MATH_DEST_REG3 },
+ { SEQINSZ, MATH_DEST_SEQINLEN },
+ { SEQOUTSZ, MATH_DEST_SEQOUTLEN },
+ { VSEQINSZ, MATH_DEST_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_DEST_VARSEQOUTLEN },
+/*9*/ { NONE, MATH_DEST_NONE },
+ { DPOVRD, MATH_DEST_DPOVRD }
+};
+
+/*
+ * Allowed MATH result destinations for each SEC Era.
+ * Values represent the number of entries from math_result[] that are
+ * supported.
+ */
+static const unsigned int math_result_sz[] = {9, 9, 10, 10, 10, 10, 10, 10};
+
+static inline int
+rta_math(struct program *program, uint64_t operand1,
+ uint32_t op, uint64_t operand2, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATH;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (((op == MATH_FUN_BSWAP) && (rta_sec_era < RTA_SEC_ERA_4)) ||
+ ((op == MATH_FUN_ZBYT) && (rta_sec_era < RTA_SEC_ERA_2))) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (options & SWP) {
+ if (rta_sec_era < RTA_SEC_ERA_7) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((options & IFB) ||
+ (!(options & IMMED) && !(options & IMMED2)) ||
+ ((options & IMMED) && (options & IMMED2))) {
+ pr_err("MATH: SWP - invalid configuration. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /*
+ * SHLD operation is different from others and we
+ * assume that we can have _NONE as first operand
+ * or _SEQINSZ as second operand
+ */
+ if ((op != MATH_FUN_SHLD) && ((operand1 == NONE) ||
+ (operand2 == SEQINSZ))) {
+ pr_err("MATH: Invalid operand. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * We first check if it is unary operation. In that
+ * case second operand must be _NONE
+ */
+ if (((op == MATH_FUN_ZBYT) || (op == MATH_FUN_BSWAP)) &&
+ (operand2 != NONE)) {
+ pr_err("MATH: Invalid operand2. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (options & IMMED) {
+ opcode |= MATH_SRC0_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand1, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand1 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write second operand field */
+ if (options & IMMED2) {
+ opcode |= MATH_SRC1_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand2, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand2 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATH: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /*
+ * as we encode operations with their "real" values, we do not
+ * to translate but we do need to validate the value
+ */
+ switch (op) {
+ /*Binary operators */
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_SHLD):
+ /* Unary operators */
+ case (MATH_FUN_ZBYT):
+ case (MATH_FUN_BSWAP):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATH: operator is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= (options & ~(IMMED | IMMED2));
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATH: length is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* Write immediate value */
+ if ((options & IMMED) && !(options & IMMED2)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand1);
+ } else if ((options & IMMED2) && !(options & IMMED)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand2);
+ } else if ((options & IMMED) && (options & IMMED2)) {
+ __rta_out32(program, lower_32_bits(operand1));
+ __rta_out32(program, lower_32_bits(operand2));
+ }
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_mathi(struct program *program, uint64_t operand,
+ uint32_t op, uint8_t imm, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATHI;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ pr_err("MATHI: Command not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (((op == MATH_FUN_FBYT) && (options & SSEL))) {
+ pr_err("MATHI: Illegal combination - FBYT and SSEL. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((options & SWP) && (rta_sec_era < RTA_SEC_ERA_7)) {
+ pr_err("MATHI: SWP not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (!(options & SSEL))
+ ret = __rta_map_opcode((uint32_t)operand, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ else
+ ret = __rta_map_opcode((uint32_t)operand, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATHI: operand not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (!(options & SSEL))
+ opcode |= val;
+ else
+ opcode |= (val << (MATHI_SRC1_SHIFT - MATH_SRC1_SHIFT));
+
+ /* Write second operand field */
+ opcode |= (imm << MATHI_IMM_SHIFT);
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATHI: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= (val << (MATHI_DEST_SHIFT - MATH_DEST_SHIFT));
+
+ /*
+ * as we encode operations with their "real" values, we do not have to
+ * translate but we do need to validate the value
+ */
+ switch (op) {
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_FBYT):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATHI: operator not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= options;
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATHI: length %d not supported. SEC PC: %d; Instr: %d\n",
+ length, program->current_pc,
+ program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_MATH_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
new file mode 100644
index 00000000..a7ff7c67
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_MOVE_CMD_H__
+#define __RTA_MOVE_CMD_H__
+
+#define MOVE_SET_AUX_SRC 0x01
+#define MOVE_SET_AUX_DST 0x02
+#define MOVE_SET_AUX_LS 0x03
+#define MOVE_SET_LEN_16b 0x04
+
+#define MOVE_SET_AUX_MATH 0x10
+#define MOVE_SET_AUX_MATH_SRC (MOVE_SET_AUX_SRC | MOVE_SET_AUX_MATH)
+#define MOVE_SET_AUX_MATH_DST (MOVE_SET_AUX_DST | MOVE_SET_AUX_MATH)
+
+#define MASK_16b 0xFF
+
+/* MOVE command type */
+#define __MOVE 1
+#define __MOVEB 2
+#define __MOVEDW 3
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t move_src_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_SRC_CLASS1CTX },
+ { CONTEXT2, MOVE_SRC_CLASS2CTX },
+ { OFIFO, MOVE_SRC_OUTFIFO },
+ { DESCBUF, MOVE_SRC_DESCBUF },
+ { MATH0, MOVE_SRC_MATH0 },
+ { MATH1, MOVE_SRC_MATH1 },
+ { MATH2, MOVE_SRC_MATH2 },
+ { MATH3, MOVE_SRC_MATH3 },
+/*9*/ { IFIFOABD, MOVE_SRC_INFIFO },
+ { IFIFOAB1, MOVE_SRC_INFIFO_CL | MOVE_AUX_LS },
+ { IFIFOAB2, MOVE_SRC_INFIFO_CL },
+/*12*/ { ABD, MOVE_SRC_INFIFO_NO_NFIFO },
+ { AB1, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_LS },
+ { AB2, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_MS }
+};
+
+/* Allowed MOVE / MOVE_LEN sources for each SEC Era.
+ * Values represent the number of entries from move_src_table[] that are
+ * supported.
+ */
+static const unsigned int move_src_table_sz[] = {9, 11, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t move_dst_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_DEST_CLASS1CTX },
+ { CONTEXT2, MOVE_DEST_CLASS2CTX },
+ { OFIFO, MOVE_DEST_OUTFIFO },
+ { DESCBUF, MOVE_DEST_DESCBUF },
+ { MATH0, MOVE_DEST_MATH0 },
+ { MATH1, MOVE_DEST_MATH1 },
+ { MATH2, MOVE_DEST_MATH2 },
+ { MATH3, MOVE_DEST_MATH3 },
+ { IFIFOAB1, MOVE_DEST_CLASS1INFIFO },
+ { IFIFOAB2, MOVE_DEST_CLASS2INFIFO },
+ { PKA, MOVE_DEST_PK_A },
+ { KEY1, MOVE_DEST_CLASS1KEY },
+ { KEY2, MOVE_DEST_CLASS2KEY },
+/*14*/ { IFIFO, MOVE_DEST_INFIFO },
+/*15*/ { ALTSOURCE, MOVE_DEST_ALTSOURCE}
+};
+
+/* Allowed MOVE / MOVE_LEN destinations for each SEC Era.
+ * Values represent the number of entries from move_dst_table[] that are
+ * supported.
+ */
+static const
+unsigned int move_dst_table_sz[] = {13, 14, 14, 15, 15, 15, 15, 15};
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt);
+
+static inline int
+math_offset(uint16_t offset);
+
+static inline int
+rta_move(struct program *program, int cmd_type, uint64_t src,
+ uint16_t src_offset, uint64_t dst,
+ uint16_t dst_offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint16_t offset = 0, opt = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_move_len_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (cmd_type != __MOVE)) {
+ pr_err("MOVE: MOVEB / MOVEDW not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* write command type */
+ if (cmd_type == __MOVEB) {
+ opcode = CMD_MOVEB;
+ } else if (cmd_type == __MOVEDW) {
+ opcode = CMD_MOVEDW;
+ } else if (!(flags & IMMED)) {
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ pr_err("MOVE: MOVE_LEN not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((length != MATH0) && (length != MATH1) &&
+ (length != MATH2) && (length != MATH3)) {
+ pr_err("MOVE: MOVE_LEN length must be MATH[0-3]. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode = CMD_MOVE_LEN;
+ is_move_len_cmd = true;
+ } else {
+ opcode = CMD_MOVE;
+ }
+
+ /* write offset first, to check for invalid combinations or incorrect
+ * offset values sooner; decide which offset should be here
+ * (src or dst)
+ */
+ ret = set_move_offset(program, src, src_offset, dst, dst_offset,
+ &offset, &opt);
+ if (ret < 0)
+ goto err;
+
+ opcode |= (offset << MOVE_OFFSET_SHIFT) & MOVE_OFFSET_MASK;
+
+ /* set AUX field if required */
+ if (opt == MOVE_SET_AUX_SRC) {
+ opcode |= ((src_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_DST) {
+ opcode |= ((dst_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_LS) {
+ opcode |= MOVE_AUX_LS;
+ } else if (opt & MOVE_SET_AUX_MATH) {
+ if (opt & MOVE_SET_AUX_SRC)
+ offset = src_offset;
+ else
+ offset = dst_offset;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ if (offset)
+ pr_debug("MOVE: Offset not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era),
+ program->current_pc,
+ program->current_instruction);
+ /* nothing to do for offset = 0 */
+ } else {
+ ret = math_offset(offset);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid offset in MATH register. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode |= (uint32_t)ret;
+ }
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode((uint32_t)src, move_src_table,
+ move_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write destination field */
+ ret = __rta_map_opcode((uint32_t)dst, move_dst_table,
+ move_dst_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write flags */
+ if (flags & (FLUSH1 | FLUSH2))
+ opcode |= MOVE_AUX_MS;
+ if (flags & (LAST2 | LAST1))
+ opcode |= MOVE_AUX_LS;
+ if (flags & WAITCOMP)
+ opcode |= MOVE_WAITCOMP;
+
+ if (!is_move_len_cmd) {
+ /* write length */
+ if (opt == MOVE_SET_LEN_16b)
+ opcode |= (length & (MOVE_OFFSET_MASK | MOVE_LEN_MASK));
+ else
+ opcode |= (length & MOVE_LEN_MASK);
+ } else {
+ /* write mrsel */
+ switch (length) {
+ case (MATH0):
+ /*
+ * opcode |= MOVELEN_MRSEL_MATH0;
+ * MOVELEN_MRSEL_MATH0 is 0
+ */
+ break;
+ case (MATH1):
+ opcode |= MOVELEN_MRSEL_MATH1;
+ break;
+ case (MATH2):
+ opcode |= MOVELEN_MRSEL_MATH2;
+ break;
+ case (MATH3):
+ opcode |= MOVELEN_MRSEL_MATH3;
+ break;
+ }
+
+ /* write size */
+ if (rta_sec_era >= RTA_SEC_ERA_7) {
+ if (flags & SIZE_WORD)
+ opcode |= MOVELEN_SIZE_WORD;
+ else if (flags & SIZE_BYTE)
+ opcode |= MOVELEN_SIZE_BYTE;
+ else if (flags & SIZE_DWORD)
+ opcode |= MOVELEN_SIZE_DWORD;
+ }
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt)
+{
+ switch (src) {
+ case (CONTEXT1):
+ case (CONTEXT2):
+ if (dst == DESCBUF) {
+ *opt = MOVE_SET_AUX_SRC;
+ *offset = dst_offset;
+ } else if ((dst == KEY1) || (dst == KEY2)) {
+ if ((src_offset) && (dst_offset)) {
+ pr_err("MOVE: Bad offset. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (dst_offset) {
+ *opt = MOVE_SET_AUX_LS;
+ *offset = dst_offset;
+ } else {
+ *offset = src_offset;
+ }
+ } else {
+ if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ }
+ break;
+
+ case (OFIFO):
+ if (dst == OFIFO) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) &&
+ (src_offset || dst_offset)) {
+ pr_err("MOVE: Offset should be zero. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ break;
+
+ case (DESCBUF):
+ if ((dst == CONTEXT1) || (dst == CONTEXT2)) {
+ *opt = MOVE_SET_AUX_DST;
+ } else if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (dst == DESCBUF) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Invalid offset alignment. SEC PC: %d; Instr %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ break;
+
+ case (MATH0):
+ case (MATH1):
+ case (MATH2):
+ case (MATH3):
+ if ((dst == OFIFO) || (dst == ALTSOURCE)) {
+ if (src_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = src_offset;
+ } else if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) {
+ *offset = src_offset;
+ } else {
+ *offset = dst_offset;
+
+ /*
+ * This condition is basically the negation of:
+ * dst in { CONTEXT[1-2], MATH[0-3] }
+ */
+ if ((dst != KEY1) && (dst != KEY2))
+ *opt = MOVE_SET_AUX_MATH_SRC;
+ }
+ break;
+
+ case (IFIFOABD):
+ case (IFIFOAB1):
+ case (IFIFOAB2):
+ case (ABD):
+ case (AB1):
+ case (AB2):
+ if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA) || (dst == ALTSOURCE)) {
+ pr_err("MOVE: Bad DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else {
+ if (dst == OFIFO) {
+ *opt = MOVE_SET_LEN_16b;
+ } else {
+ if (dst_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ err:
+ return -EINVAL;
+}
+
+static inline int
+math_offset(uint16_t offset)
+{
+ switch (offset) {
+ case 0:
+ return 0;
+ case 4:
+ return MOVE_AUX_LS;
+ case 6:
+ return MOVE_AUX_MS;
+ case 7:
+ return MOVE_AUX_LS | MOVE_AUX_MS;
+ }
+
+ return -EINVAL;
+}
+
+#endif /* __RTA_MOVE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
new file mode 100644
index 00000000..94f775e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_NFIFO_CMD_H__
+#define __RTA_NFIFO_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t nfifo_src[][2] = {
+/*1*/ { IFIFO, NFIFOENTRY_STYPE_DFIFO },
+ { OFIFO, NFIFOENTRY_STYPE_OFIFO },
+ { PAD, NFIFOENTRY_STYPE_PAD },
+/*4*/ { MSGOUTSNOOP, NFIFOENTRY_STYPE_SNOOP | NFIFOENTRY_DEST_BOTH },
+/*5*/ { ALTSOURCE, NFIFOENTRY_STYPE_ALTSOURCE },
+ { OFIFO_SYNC, NFIFOENTRY_STYPE_OFIFO_SYNC },
+/*7*/ { MSGOUTSNOOP_ALT, NFIFOENTRY_STYPE_SNOOP_ALT | NFIFOENTRY_DEST_BOTH }
+};
+
+/*
+ * Allowed NFIFO LOAD sources for each SEC Era.
+ * Values represent the number of entries from nfifo_src[] that are supported.
+ */
+static const unsigned int nfifo_src_sz[] = {4, 5, 5, 5, 5, 5, 5, 7};
+
+static const uint32_t nfifo_data[][2] = {
+ { MSG, NFIFOENTRY_DTYPE_MSG },
+ { MSG1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_MSG },
+ { MSG2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG },
+ { IV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_IV },
+ { IV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_IV },
+ { ICV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_ICV },
+ { ICV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_ICV },
+ { SAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SAD },
+ { AAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_AAD },
+ { AAD2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_AAD },
+ { AFHA_SBOX, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SBOX },
+ { SKIP, NFIFOENTRY_DTYPE_SKIP },
+ { PKE, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_E },
+ { PKN, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_N },
+ { PKA, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A },
+ { PKA0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A0 },
+ { PKA1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A1 },
+ { PKA2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A2 },
+ { PKA3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A3 },
+ { PKB, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B },
+ { PKB0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B0 },
+ { PKB1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B1 },
+ { PKB2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B2 },
+ { PKB3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B3 },
+ { AB1, NFIFOENTRY_DEST_CLASS1 },
+ { AB2, NFIFOENTRY_DEST_CLASS2 },
+ { ABD, NFIFOENTRY_DEST_DECO }
+};
+
+static const uint32_t nfifo_flags[][2] = {
+/*1*/ { LAST1, NFIFOENTRY_LC1 },
+ { LAST2, NFIFOENTRY_LC2 },
+ { FLUSH1, NFIFOENTRY_FC1 },
+ { BP, NFIFOENTRY_BND },
+ { PAD_ZERO, NFIFOENTRY_PTYPE_ZEROS },
+ { PAD_NONZERO, NFIFOENTRY_PTYPE_RND_NOZEROS },
+ { PAD_INCREMENT, NFIFOENTRY_PTYPE_INCREMENT },
+ { PAD_RANDOM, NFIFOENTRY_PTYPE_RND },
+ { PAD_ZERO_N1, NFIFOENTRY_PTYPE_ZEROS_NZ },
+ { PAD_NONZERO_0, NFIFOENTRY_PTYPE_RND_NZ_LZ },
+ { PAD_N1, NFIFOENTRY_PTYPE_N },
+/*12*/ { PAD_NONZERO_N, NFIFOENTRY_PTYPE_RND_NZ_N },
+ { FLUSH2, NFIFOENTRY_FC2 },
+ { OC, NFIFOENTRY_OC }
+};
+
+/*
+ * Allowed NFIFO LOAD flags for each SEC Era.
+ * Values represent the number of entries from nfifo_flags[] that are supported.
+ */
+static const unsigned int nfifo_flags_sz[] = {12, 14, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t nfifo_pad_flags[][2] = {
+ { BM, NFIFOENTRY_BM },
+ { PS, NFIFOENTRY_PS },
+ { PR, NFIFOENTRY_PR }
+};
+
+/*
+ * Allowed NFIFO LOAD pad flags for each SEC Era.
+ * Values represent the number of entries from nfifo_pad_flags[] that are
+ * supported.
+ */
+static const unsigned int nfifo_pad_flags_sz[] = {2, 2, 2, 2, 3, 3, 3, 3};
+
+static inline int
+rta_nfifo_load(struct program *program, uint32_t src,
+ uint32_t data, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ uint32_t load_cmd = CMD_LOAD | LDST_IMM | LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO;
+ unsigned int start_pc = program->current_pc;
+
+ if ((data == AFHA_SBOX) && (rta_sec_era == RTA_SEC_ERA_7)) {
+ pr_err("NFIFO: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode(src, nfifo_src, nfifo_src_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write type field */
+ ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write DL field */
+ if (!(flags & EXT)) {
+ opcode |= length & NFIFOENTRY_DLEN_MASK;
+ load_cmd |= 4;
+ } else {
+ load_cmd |= 8;
+ }
+
+ /* write flags */
+ __rta_map_flags(flags, nfifo_flags, nfifo_flags_sz[rta_sec_era],
+ &opcode);
+
+ /* in case of padding, check the destination */
+ if (src == PAD)
+ __rta_map_flags(flags, nfifo_pad_flags,
+ nfifo_pad_flags_sz[rta_sec_era], &opcode);
+
+ /* write LOAD command first */
+ __rta_out32(program, load_cmd);
+ __rta_out32(program, opcode);
+
+ if (flags & EXT)
+ __rta_out32(program, length & NFIFOENTRY_DLEN_MASK);
+
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_NFIFO_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
new file mode 100644
index 00000000..b85760e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_OPERATION_CMD_H__
+#define __RTA_OPERATION_CMD_H__
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000)
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+#endif
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_alg_aai_aes(uint16_t aai)
+{
+ uint16_t aes_mode = aai & OP_ALG_AESA_MODE_MASK;
+
+ if (aai & OP_ALG_AAI_C2K) {
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ return -1;
+ if ((aes_mode != OP_ALG_AAI_CCM) &&
+ (aes_mode != OP_ALG_AAI_GCM))
+ return -EINVAL;
+ }
+
+ switch (aes_mode) {
+ case OP_ALG_AAI_CBC_CMAC:
+ case OP_ALG_AAI_CTR_CMAC_LTE:
+ case OP_ALG_AAI_CTR_CMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_CTR:
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_OFB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_XTS:
+ case OP_ALG_AAI_CMAC:
+ case OP_ALG_AAI_XCBC_MAC:
+ case OP_ALG_AAI_CCM:
+ case OP_ALG_AAI_GCM:
+ case OP_ALG_AAI_CBC_XCBCMAC:
+ case OP_ALG_AAI_CTR_XCBCMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_des(uint16_t aai)
+{
+ uint16_t aai_code = (uint16_t)(aai & ~OP_ALG_AAI_CHECKODD);
+
+ switch (aai_code) {
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_OFB:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_md5(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_SMAC:
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_sha(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_rng(uint16_t aai)
+{
+ uint16_t rng_mode = aai & OP_ALG_RNG_MODE_MASK;
+ uint16_t rng_sh = aai & OP_ALG_AAI_RNG4_SH_MASK;
+
+ switch (rng_mode) {
+ case OP_ALG_AAI_RNG:
+ case OP_ALG_AAI_RNG_NZB:
+ case OP_ALG_AAI_RNG_OBP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* State Handle bits are valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && rng_sh)
+ return -EINVAL;
+
+ /* PS, AI, SK bits are also valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && (aai &
+ (OP_ALG_AAI_RNG4_PS | OP_ALG_AAI_RNG4_AI | OP_ALG_AAI_RNG4_SK)))
+ return -EINVAL;
+
+ switch (rng_sh) {
+ case OP_ALG_AAI_RNG4_SH_0:
+ case OP_ALG_AAI_RNG4_SH_1:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_crc(uint16_t aai)
+{
+ uint16_t aai_code = aai & OP_ALG_CRC_POLY_MASK;
+
+ switch (aai_code) {
+ case OP_ALG_AAI_802:
+ case OP_ALG_AAI_3385:
+ case OP_ALG_AAI_CUST_POLY:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_kasumi(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_GSM:
+ case OP_ALG_AAI_EDGE:
+ case OP_ALG_AAI_F8:
+ case OP_ALG_AAI_F9:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f9(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f8(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuce(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuca(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+struct alg_aai_map {
+ uint32_t chipher_algo;
+ int (*aai_func)(uint16_t);
+ uint32_t class;
+};
+
+static const struct alg_aai_map alg_table[] = {
+/*1*/ { OP_ALG_ALGSEL_AES, __rta_alg_aai_aes, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_3DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_MD5, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA1, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA224, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA256, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA384, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA512, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_RNG, __rta_alg_aai_rng, OP_TYPE_CLASS1_ALG },
+/*11*/ { OP_ALG_ALGSEL_CRC, __rta_alg_aai_crc, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ARC4, NULL, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F8, __rta_alg_aai_snow_f8, OP_TYPE_CLASS1_ALG },
+/*14*/ { OP_ALG_ALGSEL_KASUMI, __rta_alg_aai_kasumi, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F9, __rta_alg_aai_snow_f9, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ZUCE, __rta_alg_aai_zuce, OP_TYPE_CLASS1_ALG },
+/*17*/ { OP_ALG_ALGSEL_ZUCA, __rta_alg_aai_zuca, OP_TYPE_CLASS2_ALG }
+};
+
+/*
+ * Allowed OPERATION algorithms for each SEC Era.
+ * Values represent the number of entries from alg_table[] that are supported.
+ */
+static const unsigned int alg_table_sz[] = {14, 15, 15, 15, 17, 17, 11, 17};
+
+static inline int
+rta_operation(struct program *program, uint32_t cipher_algo,
+ uint16_t aai, uint8_t algo_state,
+ int icv_checking, int enc)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ unsigned int start_pc = program->current_pc;
+ int ret;
+
+ for (i = 0; i < alg_table_sz[rta_sec_era]; i++) {
+ if (alg_table[i].chipher_algo == cipher_algo) {
+ opcode |= cipher_algo | alg_table[i].class;
+ /* nothing else to verify */
+ if (alg_table[i].aai_func == NULL) {
+ found = 1;
+ break;
+ }
+
+ aai &= OP_ALG_AAI_MASK;
+
+ ret = (*alg_table[i].aai_func)(aai);
+ if (ret < 0) {
+ pr_err("OPERATION: Bad AAI Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= aai;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("OPERATION: Invalid Command. SEC Program Line: %d\n",
+ program->current_pc);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (algo_state) {
+ case OP_ALG_AS_UPDATE:
+ case OP_ALG_AS_INIT:
+ case OP_ALG_AS_FINALIZE:
+ case OP_ALG_AS_INITFINAL:
+ opcode |= algo_state;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (icv_checking) {
+ case ICV_CHECK_DISABLE:
+ /*
+ * opcode |= OP_ALG_ICV_OFF;
+ * OP_ALG_ICV_OFF is 0
+ */
+ break;
+ case ICV_CHECK_ENABLE:
+ opcode |= OP_ALG_ICV_ON;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (enc) {
+ case DIR_DEC:
+ /*
+ * opcode |= OP_ALG_DECRYPT;
+ * OP_ALG_DECRYPT is 0
+ */
+ break;
+ case DIR_ENC:
+ opcode |= OP_ALG_ENCRYPT;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ return ret;
+}
+
+/*
+ * OPERATION PKHA routines
+ */
+static inline int
+__rta_pkha_clearmem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_CLEARMEM_ALL):
+ case (OP_ALG_PKMODE_CLEARMEM_ABE):
+ case (OP_ALG_PKMODE_CLEARMEM_ABN):
+ case (OP_ALG_PKMODE_CLEARMEM_AB):
+ case (OP_ALG_PKMODE_CLEARMEM_AEN):
+ case (OP_ALG_PKMODE_CLEARMEM_AE):
+ case (OP_ALG_PKMODE_CLEARMEM_AN):
+ case (OP_ALG_PKMODE_CLEARMEM_A):
+ case (OP_ALG_PKMODE_CLEARMEM_BEN):
+ case (OP_ALG_PKMODE_CLEARMEM_BE):
+ case (OP_ALG_PKMODE_CLEARMEM_BN):
+ case (OP_ALG_PKMODE_CLEARMEM_B):
+ case (OP_ALG_PKMODE_CLEARMEM_EN):
+ case (OP_ALG_PKMODE_CLEARMEM_N):
+ case (OP_ALG_PKMODE_CLEARMEM_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_mod_arithmetic(uint32_t pkha_op)
+{
+ pkha_op &= (uint32_t)~OP_ALG_PKMODE_OUT_A;
+
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_MULT_IM):
+ case (OP_ALG_PKMODE_MOD_MULT_IM_OM):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_EXPO_TEQ):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM_TEQ):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_F2M_ADD):
+ case (OP_ALG_PKMODE_F2M_MUL):
+ case (OP_ALG_PKMODE_F2M_MUL_IM):
+ case (OP_ALG_PKMODE_F2M_MUL_IM_OM):
+ case (OP_ALG_PKMODE_F2M_EXP):
+ case (OP_ALG_PKMODE_F2M_EXP_TEQ):
+ case (OP_ALG_PKMODE_F2M_AMODN):
+ case (OP_ALG_PKMODE_F2M_INV):
+ case (OP_ALG_PKMODE_F2M_R2):
+ case (OP_ALG_PKMODE_F2M_GCD):
+ case (OP_ALG_PKMODE_F2M_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_copymem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rta_pkha_operation(struct program *program, uint32_t op_pkha)
+{
+ uint32_t opcode = CMD_OPERATION | OP_TYPE_PK | OP_ALG_PK;
+ uint32_t pkha_func;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ pkha_func = op_pkha & OP_ALG_PK_FUN_MASK;
+
+ switch (pkha_func) {
+ case (OP_ALG_PKMODE_CLEARMEM):
+ ret = __rta_pkha_clearmem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ ret = __rta_pkha_mod_arithmetic(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_COPY_NSZ):
+ case (OP_ALG_PKMODE_COPY_SSZ):
+ ret = __rta_pkha_copymem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ goto err;
+ }
+
+ opcode |= op_pkha;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_OPERATION_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
new file mode 100644
index 00000000..d9a5b0e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -0,0 +1,699 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_PROTOCOL_CMD_H__
+#define __RTA_PROTOCOL_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_ssl_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_SSL30_RC4_40_MD5_2:
+ case OP_PCL_SSL30_RC4_128_MD5_2:
+ case OP_PCL_SSL30_RC4_128_SHA_5:
+ case OP_PCL_SSL30_RC4_40_MD5_3:
+ case OP_PCL_SSL30_RC4_128_MD5_3:
+ case OP_PCL_SSL30_RC4_128_SHA:
+ case OP_PCL_SSL30_RC4_128_MD5:
+ case OP_PCL_SSL30_RC4_40_SHA:
+ case OP_PCL_SSL30_RC4_40_MD5:
+ case OP_PCL_SSL30_RC4_128_SHA_2:
+ case OP_PCL_SSL30_RC4_128_SHA_3:
+ case OP_PCL_SSL30_RC4_128_SHA_4:
+ case OP_PCL_SSL30_RC4_128_SHA_6:
+ case OP_PCL_SSL30_RC4_128_SHA_7:
+ case OP_PCL_SSL30_RC4_128_SHA_8:
+ case OP_PCL_SSL30_RC4_128_SHA_9:
+ case OP_PCL_SSL30_RC4_128_SHA_10:
+ case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+ /* fall through if not Era 7 */
+ case OP_PCL_SSL30_DES40_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_SHA_2:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_2:
+ case OP_PCL_SSL30_DES_CBC_SHA_3:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
+ case OP_PCL_SSL30_DES40_CBC_SHA_3:
+ case OP_PCL_SSL30_DES_CBC_SHA_4:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_SHA_4:
+ case OP_PCL_SSL30_DES_CBC_SHA_5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
+ case OP_PCL_SSL30_DES40_CBC_SHA_5:
+ case OP_PCL_SSL30_DES_CBC_SHA_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
+ case OP_PCL_SSL30_DES40_CBC_SHA_6:
+ case OP_PCL_SSL30_DES_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
+ case OP_PCL_SSL30_DES_CBC_SHA:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_MD5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_MD5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_6:
+ case OP_PCL_SSL30_AES_256_CBC_SHA:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_6:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_7:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_8:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_8:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
+ case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_10:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_10:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_13:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_14:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_17:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_17:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
+ case OP_PCL_TLS12_AES_128_CBC_SHA160:
+ case OP_PCL_TLS12_AES_128_CBC_SHA224:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256:
+ case OP_PCL_TLS12_AES_128_CBC_SHA384:
+ case OP_PCL_TLS12_AES_128_CBC_SHA512:
+ case OP_PCL_TLS12_AES_192_CBC_SHA160:
+ case OP_PCL_TLS12_AES_192_CBC_SHA224:
+ case OP_PCL_TLS12_AES_192_CBC_SHA256:
+ case OP_PCL_TLS12_AES_192_CBC_SHA512:
+ case OP_PCL_TLS12_AES_256_CBC_SHA160:
+ case OP_PCL_TLS12_AES_256_CBC_SHA224:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256:
+ case OP_PCL_TLS12_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_AES_256_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ike_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_IKE_HMAC_MD5:
+ case OP_PCL_IKE_HMAC_SHA1:
+ case OP_PCL_IKE_HMAC_AES128_CBC:
+ case OP_PCL_IKE_HMAC_SHA256:
+ case OP_PCL_IKE_HMAC_SHA384:
+ case OP_PCL_IKE_HMAC_SHA512:
+ case OP_PCL_IKE_HMAC_AES128_CMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ipsec_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_IPSEC_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_IPSEC_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ /* CCM, GCM, GMAC require PROTINFO[7:0] = 0 */
+ if (proto_cls2 == OP_PCL_IPSEC_HMAC_NULL)
+ return 0;
+ return -EINVAL;
+ case OP_PCL_IPSEC_NULL:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_AES_CTR:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (proto_cls2) {
+ case OP_PCL_IPSEC_HMAC_NULL:
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_AES_XCBC_MAC_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ case OP_PCL_IPSEC_AES_CMAC_96:
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_srtp_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_SRTP_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_SRTP_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_SRTP_AES_CTR:
+ switch (proto_cls2) {
+ case OP_PCL_SRTP_HMAC_SHA1_160:
+ return 0;
+ }
+ /* no break */
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_macsec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_MACSEC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wifi_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIFI:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wimax_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIMAX_OFDM:
+ case OP_PCL_WIMAX_OFDMA:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Allowed blob proto flags for each SEC Era */
+static const uint32_t proto_blob_flags[] = {
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
+};
+
+static inline int
+__rta_blob_proto(uint16_t protoinfo)
+{
+ if (protoinfo & ~proto_blob_flags[rta_sec_era])
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_BLOB_FORMAT_MASK) {
+ case OP_PCL_BLOB_FORMAT_NORMAL:
+ case OP_PCL_BLOB_FORMAT_MASTER_VER:
+ case OP_PCL_BLOB_FORMAT_TEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_BLOB_REG_MASK) {
+ case OP_PCL_BLOB_AFHA_SBOX:
+ if (rta_sec_era < RTA_SEC_ERA_3)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_BLOB_REG_MEMORY:
+ case OP_PCL_BLOB_REG_KEY1:
+ case OP_PCL_BLOB_REG_KEY2:
+ case OP_PCL_BLOB_REG_SPLIT:
+ case OP_PCL_BLOB_REG_PKE:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_dlc_proto(uint16_t protoinfo)
+{
+ if ((rta_sec_era < RTA_SEC_ERA_2) &&
+ (protoinfo & (OP_PCL_PKPROT_DSA_MSG | OP_PCL_PKPROT_HASH_MASK |
+ OP_PCL_PKPROT_EKT_Z | OP_PCL_PKPROT_DECRYPT_Z |
+ OP_PCL_PKPROT_DECRYPT_PRI)))
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_PKPROT_HASH_MASK) {
+ case OP_PCL_PKPROT_HASH_MD5:
+ case OP_PCL_PKPROT_HASH_SHA1:
+ case OP_PCL_PKPROT_HASH_SHA224:
+ case OP_PCL_PKPROT_HASH_SHA256:
+ case OP_PCL_PKPROT_HASH_SHA384:
+ case OP_PCL_PKPROT_HASH_SHA512:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_enc_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_ENC_F_IN:
+ if ((protoinfo & OP_PCL_RSAPROT_FFF_MASK) !=
+ OP_PCL_RSAPROT_FFF_RED)
+ return -EINVAL;
+ break;
+ case OP_PCL_RSAPROT_OP_ENC_F_OUT:
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_dec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_DEC_ND:
+ case OP_PCL_RSAPROT_OP_DEC_PQD:
+ case OP_PCL_RSAPROT_OP_DEC_PQDPDQC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_RSAPROT_PPP_MASK) {
+ case OP_PCL_RSAPROT_PPP_RED:
+ case OP_PCL_RSAPROT_PPP_ENC:
+ case OP_PCL_RSAPROT_PPP_EKT:
+ case OP_PCL_RSAPROT_PPP_TK_ENC:
+ case OP_PCL_RSAPROT_PPP_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (protoinfo & OP_PCL_RSAPROT_FMT_PKCSV15)
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * DKP Protocol - Restrictions on key (SRC,DST) combinations
+ * For e.g. key_in_out[0][0] = 1 means (SRC=IMM,DST=IMM) combination is allowed
+ */
+static const uint8_t key_in_out[4][4] = { {1, 0, 0, 0},
+ {1, 1, 1, 1},
+ {1, 0, 1, 0},
+ {1, 0, 0, 1} };
+
+static inline int
+__rta_dkp_proto(uint16_t protoinfo)
+{
+ int key_src = (protoinfo & OP_PCL_DKP_SRC_MASK) >> OP_PCL_DKP_SRC_SHIFT;
+ int key_dst = (protoinfo & OP_PCL_DKP_DST_MASK) >> OP_PCL_DKP_DST_SHIFT;
+
+ if (!key_in_out[key_src][key_dst]) {
+ pr_err("PROTO_DESC: Invalid DKP key (SRC,DST)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+__rta_3g_dcrc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_DCRC_CRC7:
+ case OP_PCL_3G_DCRC_CRC11:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_3g_rlc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_RLC_NULL:
+ case OP_PCL_3G_RLC_KASUMI:
+ case OP_PCL_3G_RLC_SNOW:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_LTE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ break;
+ case OP_PCL_LTE_NULL:
+ case OP_PCL_LTE_SNOW:
+ case OP_PCL_LTE_AES:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_mixed_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_LTE_MIXED_AUTH_MASK) {
+ case OP_PCL_LTE_MIXED_AUTH_NULL:
+ case OP_PCL_LTE_MIXED_AUTH_SNOW:
+ case OP_PCL_LTE_MIXED_AUTH_AES:
+ case OP_PCL_LTE_MIXED_AUTH_ZUC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_LTE_MIXED_ENC_MASK) {
+ case OP_PCL_LTE_MIXED_ENC_NULL:
+ case OP_PCL_LTE_MIXED_ENC_SNOW:
+ case OP_PCL_LTE_MIXED_ENC_AES:
+ case OP_PCL_LTE_MIXED_ENC_ZUC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+struct proto_map {
+ uint32_t optype;
+ uint32_t protid;
+ int (*protoinfo_func)(uint16_t);
+};
+
+static const struct proto_map proto_table[] = {
+/*1*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_SSL30_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSAVERIFY, __rta_dlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC, __rta_ipsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SRTP, __rta_srtp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SSL30, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
+/*21*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_BLOB, __rta_blob_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DIFFIEHELLMAN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSAENCRYPT, __rta_rsa_enc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSADECRYPT, __rta_rsa_dec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_DCRC, __rta_3g_dcrc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_PDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_SDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER, __rta_lte_pdcp_proto},
+/*29*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL, __rta_lte_pdcp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_MD5, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA1, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA224, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA256, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA384, __rta_dkp_proto},
+/*35*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA512, __rta_dkp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+/*37*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+/*38*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ __rta_lte_pdcp_mixed_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC_NEW, __rta_ipsec_proto},
+};
+
+/*
+ * Allowed OPERATION protocols for each SEC Era.
+ * Values represent the number of entries from proto_table[] that are supported.
+ */
+static const unsigned int proto_table_sz[] = {21, 29, 29, 29, 29, 35, 37, 39};
+
+static inline int
+rta_proto_operation(struct program *program, uint32_t optype,
+ uint32_t protid, uint16_t protoinfo)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ uint32_t optype_tmp = optype;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ for (i = 0; i < proto_table_sz[rta_sec_era]; i++) {
+ /* clear last bit in optype to match also decap proto */
+ optype_tmp &= (uint32_t)~(1 << OP_TYPE_SHIFT);
+ if (optype_tmp == proto_table[i].optype) {
+ if (proto_table[i].protid == protid) {
+ /* nothing else to verify */
+ if (proto_table[i].protoinfo_func == NULL) {
+ found = 1;
+ break;
+ }
+ /* check protoinfo */
+ ret = (*proto_table[i].protoinfo_func)
+ (protoinfo);
+ if (ret < 0) {
+ pr_err("PROTO_DESC: Bad PROTO Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ pr_err("PROTO_DESC: Operation Type Mismatch. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ __rta_out32(program, opcode | optype | protid | protoinfo);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_dkp_proto(struct program *program, uint32_t protid,
+ uint16_t key_src, uint16_t key_dst,
+ uint16_t keylen, uint64_t key,
+ enum rta_data_type key_type)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int in_words = 0, out_words = 0;
+ int ret;
+
+ key_src &= OP_PCL_DKP_SRC_MASK;
+ key_dst &= OP_PCL_DKP_DST_MASK;
+ keylen &= OP_PCL_DKP_KEY_MASK;
+
+ ret = rta_proto_operation(program, OP_TYPE_UNI_PROTOCOL, protid,
+ key_src | key_dst | keylen);
+ if (ret < 0)
+ return ret;
+
+ if ((key_src == OP_PCL_DKP_SRC_PTR) ||
+ (key_src == OP_PCL_DKP_SRC_SGF)) {
+ __rta_out64(program, program->ps, key);
+ in_words = program->ps ? 2 : 1;
+ } else if (key_src == OP_PCL_DKP_SRC_IMM) {
+ __rta_inline_data(program, key, inline_flags(key_type), keylen);
+ in_words = (unsigned int)((keylen + 3) / 4);
+ }
+
+ if ((key_dst == OP_PCL_DKP_DST_PTR) ||
+ (key_dst == OP_PCL_DKP_DST_SGF)) {
+ out_words = in_words;
+ } else if (key_dst == OP_PCL_DKP_DST_IMM) {
+ out_words = split_key_len(protid) / 4;
+ }
+
+ if (out_words < in_words) {
+ pr_err("PROTO_DESC: DKP doesn't currently support a smaller descriptor\n");
+ program->first_error_pc = start_pc;
+ return -EINVAL;
+ }
+
+ /* If needed, reserve space in resulting descriptor for derived key */
+ program->current_pc += (out_words - in_words);
+
+ return (int)start_pc;
+}
+
+#endif /* __RTA_PROTOCOL_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
new file mode 100644
index 00000000..6e666108
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -0,0 +1,790 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SEC_RUN_TIME_ASM_H__
+#define __RTA_SEC_RUN_TIME_ASM_H__
+
+#include "hw/desc.h"
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/**
+ * enum rta_sec_era - SEC HW block revisions supported by the RTA library
+ * @RTA_SEC_ERA_1: SEC Era 1
+ * @RTA_SEC_ERA_2: SEC Era 2
+ * @RTA_SEC_ERA_3: SEC Era 3
+ * @RTA_SEC_ERA_4: SEC Era 4
+ * @RTA_SEC_ERA_5: SEC Era 5
+ * @RTA_SEC_ERA_6: SEC Era 6
+ * @RTA_SEC_ERA_7: SEC Era 7
+ * @RTA_SEC_ERA_8: SEC Era 8
+ * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
+ */
+enum rta_sec_era {
+ RTA_SEC_ERA_1,
+ RTA_SEC_ERA_2,
+ RTA_SEC_ERA_3,
+ RTA_SEC_ERA_4,
+ RTA_SEC_ERA_5,
+ RTA_SEC_ERA_6,
+ RTA_SEC_ERA_7,
+ RTA_SEC_ERA_8,
+ MAX_SEC_ERA = RTA_SEC_ERA_8
+};
+
+/**
+ * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
+ * an unsupported value.
+ */
+#define DEFAULT_SEC_ERA MAX_SEC_ERA
+
+/**
+ * USER_SEC_ERA - translates the SEC Era from internal to user representation.
+ * @sec_era: SEC Era in internal (library) representation
+ */
+#define USER_SEC_ERA(sec_era) (sec_era + 1)
+
+/**
+ * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
+ * @sec_era: SEC Era in user representation
+ */
+#define INTL_SEC_ERA(sec_era) (sec_era - 1)
+
+/**
+ * enum rta_jump_type - Types of action taken by JUMP command
+ * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
+ * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
+ * indicated by the POINTER field after the JUMP command.
+ * @HALT: conditional halt - stop the execution of the current descriptor and
+ * writes PKHA / Math condition bits as status / error code.
+ * @HALT_STATUS: conditional halt with user-specified status - stop the
+ * execution of the current descriptor and writes the value of
+ * "LOCAL OFFSET" JUMP field as status / error code.
+ * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
+ * return address in the Return Address register; subroutine calls
+ * cannot be nested.
+ * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
+ * offset is taken from the Return Address register.
+ * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ */
+enum rta_jump_type {
+ LOCAL_JUMP,
+ FAR_JUMP,
+ HALT,
+ HALT_STATUS,
+ GOSUB,
+ RETURN,
+ LOCAL_JUMP_INC,
+ LOCAL_JUMP_DEC
+};
+
+/**
+ * enum rta_jump_cond - How test conditions are evaluated by JUMP command
+ * @ALL_TRUE: perform action if ALL selected conditions are true
+ * @ALL_FALSE: perform action if ALL selected conditions are false
+ * @ANY_TRUE: perform action if ANY of the selected conditions is true
+ * @ANY_FALSE: perform action if ANY of the selected conditions is false
+ */
+enum rta_jump_cond {
+ ALL_TRUE,
+ ALL_FALSE,
+ ANY_TRUE,
+ ANY_FALSE
+};
+
+/**
+ * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
+ * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
+ * dependencies are allowed between them).
+ * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
+ * "OK to share" in DECO Control Register (DCTRL).
+ * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
+ * completed.
+ * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
+ * loaded.
+ * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
+ * in the shared descriptor associated with the job descriptor.
+ */
+enum rta_share_type {
+ SHR_NEVER,
+ SHR_WAIT,
+ SHR_SERIAL,
+ SHR_ALWAYS,
+ SHR_DEFER
+};
+
+/**
+ * enum rta_data_type - Indicates how is the data provided and how to include it
+ * in the descriptor.
+ * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
+ * physical (bus) address.
+ * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
+ * data address is a virtual address.
+ * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
+ * immediate data; data address is a physical (bus) address
+ * in external memory and CDMA is programmed to transfer the
+ * data into descriptor buffer being built in Workspace Area.
+ */
+enum rta_data_type {
+ RTA_DATA_PTR = 1,
+ RTA_DATA_IMM,
+ RTA_DATA_IMM_DMA
+};
+
+/* Registers definitions */
+enum rta_regs {
+ /* CCB Registers */
+ CONTEXT1 = 1,
+ CONTEXT2,
+ KEY1,
+ KEY2,
+ KEY1SZ,
+ KEY2SZ,
+ ICV1SZ,
+ ICV2SZ,
+ DATA1SZ,
+ DATA2SZ,
+ ALTDS1,
+ IV1SZ,
+ AAD1SZ,
+ MODE1,
+ MODE2,
+ CCTRL,
+ DCTRL,
+ ICTRL,
+ CLRW,
+ CSTAT,
+ IFIFO,
+ NFIFO,
+ OFIFO,
+ PKASZ,
+ PKBSZ,
+ PKNSZ,
+ PKESZ,
+ /* DECO Registers */
+ MATH0,
+ MATH1,
+ MATH2,
+ MATH3,
+ DESCBUF,
+ JOBDESCBUF,
+ SHAREDESCBUF,
+ DPOVRD,
+ DJQDA,
+ DSTAT,
+ DPID,
+ DJQCTRL,
+ ALTSOURCE,
+ SEQINSZ,
+ SEQOUTSZ,
+ VSEQINSZ,
+ VSEQOUTSZ,
+ /* PKHA Registers */
+ PKA,
+ PKN,
+ PKA0,
+ PKA1,
+ PKA2,
+ PKA3,
+ PKB,
+ PKB0,
+ PKB1,
+ PKB2,
+ PKB3,
+ PKE,
+ /* Pseudo registers */
+ AB1,
+ AB2,
+ ABD,
+ IFIFOABD,
+ IFIFOAB1,
+ IFIFOAB2,
+ AFHA_SBOX,
+ MDHA_SPLIT_KEY,
+ JOBSRC,
+ ZERO,
+ ONE,
+ AAD1,
+ IV1,
+ IV2,
+ MSG1,
+ MSG2,
+ MSG,
+ MSG_CKSUM,
+ MSGOUTSNOOP,
+ MSGINSNOOP,
+ ICV1,
+ ICV2,
+ SKIP,
+ NONE,
+ RNGOFIFO,
+ RNG,
+ IDFNS,
+ ODFNS,
+ NFIFOSZ,
+ SZ,
+ PAD,
+ SAD1,
+ AAD2,
+ BIT_DATA,
+ NFIFO_SZL,
+ NFIFO_SZM,
+ NFIFO_L,
+ NFIFO_M,
+ SZL,
+ SZM,
+ JOBDESCBUF_EFF,
+ SHAREDESCBUF_EFF,
+ METADATA,
+ GTR,
+ STR,
+ OFIFO_SYNC,
+ MSGOUTSNOOP_ALT
+};
+
+/* Command flags */
+#define FLUSH1 BIT(0)
+#define LAST1 BIT(1)
+#define LAST2 BIT(2)
+#define IMMED BIT(3)
+#define SGF BIT(4)
+#define VLF BIT(5)
+#define EXT BIT(6)
+#define CONT BIT(7)
+#define SEQ BIT(8)
+#define AIDF BIT(9)
+#define FLUSH2 BIT(10)
+#define CLASS1 BIT(11)
+#define CLASS2 BIT(12)
+#define BOTH BIT(13)
+
+/**
+ * DCOPY - (AIOP only) command param is pointer to external memory
+ *
+ * CDMA must be used to transfer the key via DMA into Workspace Area.
+ * Valid only in combination with IMMED flag.
+ */
+#define DCOPY BIT(30)
+
+#define COPY BIT(31) /* command param is pointer (not immediate)
+ * valid only in combination when IMMED
+ */
+
+#define __COPY_MASK (COPY | DCOPY)
+
+/* SEQ IN/OUT PTR Command specific flags */
+#define RBS BIT(16)
+#define INL BIT(17)
+#define PRE BIT(18)
+#define RTO BIT(19)
+#define RJD BIT(20)
+#define SOP BIT(21)
+#define RST BIT(22)
+#define EWS BIT(23)
+
+#define ENC BIT(14) /* Encrypted Key */
+#define EKT BIT(15) /* AES CCM Encryption (default is
+ * AES ECB Encryption)
+ */
+#define TK BIT(16) /* Trusted Descriptor Key (default is
+ * Job Descriptor Key)
+ */
+#define NWB BIT(17) /* No Write Back Key */
+#define PTS BIT(18) /* Plaintext Store */
+
+/* HEADER Command specific flags */
+#define RIF BIT(16)
+#define DNR BIT(17)
+#define CIF BIT(18)
+#define PD BIT(19)
+#define RSMS BIT(20)
+#define TD BIT(21)
+#define MTD BIT(22)
+#define REO BIT(23)
+#define SHR BIT(24)
+#define SC BIT(25)
+/* Extended HEADER specific flags */
+#define DSV BIT(7)
+#define DSEL_MASK 0x00000007 /* DECO Select */
+#define FTD BIT(8)
+
+/* JUMP Command specific flags */
+#define NIFP BIT(20)
+#define NIP BIT(21)
+#define NOP BIT(22)
+#define NCP BIT(23)
+#define CALM BIT(24)
+
+#define MATH_Z BIT(25)
+#define MATH_N BIT(26)
+#define MATH_NV BIT(27)
+#define MATH_C BIT(28)
+#define PK_0 BIT(29)
+#define PK_GCD_1 BIT(30)
+#define PK_PRIME BIT(31)
+#define SELF BIT(0)
+#define SHRD BIT(1)
+#define JQP BIT(2)
+
+/* NFIFOADD specific flags */
+#define PAD_ZERO BIT(16)
+#define PAD_NONZERO BIT(17)
+#define PAD_INCREMENT BIT(18)
+#define PAD_RANDOM BIT(19)
+#define PAD_ZERO_N1 BIT(20)
+#define PAD_NONZERO_0 BIT(21)
+#define PAD_N1 BIT(23)
+#define PAD_NONZERO_N BIT(24)
+#define OC BIT(25)
+#define BM BIT(26)
+#define PR BIT(27)
+#define PS BIT(28)
+#define BP BIT(29)
+
+/* MOVE Command specific flags */
+#define WAITCOMP BIT(16)
+#define SIZE_WORD BIT(17)
+#define SIZE_BYTE BIT(18)
+#define SIZE_DWORD BIT(19)
+
+/* MATH command specific flags */
+#define IFB MATH_IFB
+#define NFU MATH_NFU
+#define STL MATH_STL
+#define SSEL MATH_SSEL
+#define SWP MATH_SWP
+#define IMMED2 BIT(31)
+
+/**
+ * struct program - descriptor buffer management structure
+ * @current_pc: current offset in descriptor
+ * @current_instruction: current instruction in descriptor
+ * @first_error_pc: offset of the first error in descriptor
+ * @start_pc: start offset in descriptor buffer
+ * @buffer: buffer carrying descriptor
+ * @shrhdr: shared descriptor header
+ * @jobhdr: job descriptor header
+ * @ps: pointer fields size; if ps is true, pointers will be 36bits in
+ * length; if ps is false, pointers will be 32bits in length
+ * @bswap: if true, perform byte swap on a 4-byte boundary
+ */
+struct program {
+ unsigned int current_pc;
+ unsigned int current_instruction;
+ unsigned int first_error_pc;
+ unsigned int start_pc;
+ uint32_t *buffer;
+ uint32_t *shrhdr;
+ uint32_t *jobhdr;
+ bool ps;
+ bool bswap;
+};
+
+static inline void
+rta_program_cntxt_init(struct program *program,
+ uint32_t *buffer, unsigned int offset)
+{
+ program->current_pc = 0;
+ program->current_instruction = 0;
+ program->first_error_pc = 0;
+ program->start_pc = offset;
+ program->buffer = buffer;
+ program->shrhdr = NULL;
+ program->jobhdr = NULL;
+ program->ps = false;
+ program->bswap = false;
+}
+
+static inline int
+rta_program_finalize(struct program *program)
+{
+ /* Descriptor is usually not allowed to go beyond 64 words size */
+ if (program->current_pc > MAX_CAAM_DESCSIZE)
+ pr_warn("Descriptor Size exceeded max limit of 64 words\n");
+
+ /* Descriptor is erroneous */
+ if (program->first_error_pc) {
+ pr_err("Descriptor creation error\n");
+ return -EINVAL;
+ }
+
+ /* Update descriptor length in shared and job descriptor headers */
+ if (program->shrhdr != NULL)
+ *program->shrhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+ else if (program->jobhdr != NULL)
+ *program->jobhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+
+ return (int)program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_36bit_addr(struct program *program)
+{
+ program->ps = true;
+ return program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_bswap(struct program *program)
+{
+ program->bswap = true;
+ return program->current_pc;
+}
+
+static inline void
+__rta_out32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = program->bswap ?
+ swab32(val) : val;
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_be32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_be32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_le32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_le32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out64(struct program *program, bool is_ext, uint64_t val)
+{
+ if (is_ext) {
+ /*
+ * Since we are guaranteed only a 4-byte alignment in the
+ * descriptor buffer, we have to do 2 x 32-bit (word) writes.
+ * For the order of the 2 words to be correct, we need to
+ * take into account the endianness of the CPU.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+#else
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+#endif
+ } else {
+ __rta_out32(program, lower_32_bits(val));
+ }
+}
+
+static inline unsigned int
+rta_word(struct program *program, uint32_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, val);
+
+ return start_pc;
+}
+
+static inline unsigned int
+rta_dword(struct program *program, uint64_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out64(program, true, val);
+
+ return start_pc;
+}
+
+static inline uint32_t
+inline_flags(enum rta_data_type data_type)
+{
+ switch (data_type) {
+ case RTA_DATA_PTR:
+ return 0;
+ case RTA_DATA_IMM:
+ return IMMED | COPY;
+ case RTA_DATA_IMM_DMA:
+ return IMMED | DCOPY;
+ default:
+ /* warn and default to RTA_DATA_PTR */
+ pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
+ return 0;
+ }
+}
+
+static inline unsigned int
+rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
+{
+ unsigned int i;
+ unsigned int start_pc = program->current_pc;
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+
+ for (i = 0; i < length; i++)
+ *tmp++ = data[i];
+ program->current_pc += (length + 3) / 4;
+
+ return start_pc;
+}
+
+#if defined(__EWL__) && defined(AIOP)
+static inline void
+__rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
+{ cdma_read(ws_dst, ext_address, size); }
+#else
+static inline void
+__rta_dma_data(void *ws_dst __maybe_unused,
+ uint64_t ext_address __maybe_unused,
+ uint16_t size __maybe_unused)
+{ pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
+#endif /* defined(__EWL__) && defined(AIOP) */
+
+static inline void
+__rta_inline_data(struct program *program, uint64_t data,
+ uint32_t copy_data, uint32_t length)
+{
+ if (!copy_data) {
+ __rta_out64(program, length > 4, data);
+ } else if (copy_data & COPY) {
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
+ program->current_pc += ((length + 3) / 4);
+ } else if (copy_data & DCOPY) {
+ __rta_dma_data(&program->buffer[program->current_pc], data,
+ (uint16_t)length);
+ program->current_pc += ((length + 3) / 4);
+ }
+}
+
+static inline unsigned int
+rta_desc_len(uint32_t *buffer)
+{
+ if ((*buffer & CMD_MASK) == CMD_DESC_HDR)
+ return *buffer & HDR_DESCLEN_MASK;
+ else
+ return *buffer & HDR_DESCLEN_SHR_MASK;
+}
+
+static inline unsigned int
+rta_desc_bytes(uint32_t *buffer)
+{
+ return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
+}
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
+ * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline uint32_t
+split_key_len(uint32_t hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ uint32_t idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (uint32_t)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline uint32_t
+split_key_pad_len(uint32_t hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
+static inline unsigned int
+rta_set_label(struct program *program)
+{
+ return program->current_pc + program->start_pc;
+}
+
+static inline int
+rta_patch_move(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~MOVE_OFFSET_MASK;
+ opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~JUMP_OFFSET_MASK;
+ opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_header(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~HDR_START_IDX_MASK;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_load(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = (bswap ? swab32(program->buffer[line]) :
+ program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
+
+ if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
+ opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ else
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_store(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~LDST_OFFSET_MASK;
+
+ switch (opcode & LDST_SRCDST_MASK) {
+ case LDST_SRCDST_WORD_DESCBUF:
+ case LDST_SRCDST_WORD_DESCBUF_JOB:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED:
+ case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
+ opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ break;
+ default:
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_raw(struct program *program, int line, unsigned int mask,
+ unsigned int new_val)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~mask;
+ opcode |= new_val & mask;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+__rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
+ unsigned int num_of_entries, uint32_t *val)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++)
+ if (map_table[i][0] == name) {
+ *val = map_table[i][1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void
+__rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
+ unsigned int num_of_entries, uint32_t *opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++) {
+ if (flags_table[i][0] & flags)
+ *opcode |= flags_table[i][1];
+ }
+}
+
+#endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
new file mode 100644
index 00000000..ceb6a871
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
+#define __RTA_SEQ_IN_OUT_PTR_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed SEQ IN PTR flags for each SEC Era. */
+static const uint32_t seq_in_ptr_flags[] = {
+ RBS | INL | SGF | PRE | EXT | RTO,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP
+};
+
+/* Allowed SEQ OUT PTR flags for each SEC Era. */
+static const uint32_t seq_out_ptr_flags[] = {
+ SGF | PRE | EXT,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS
+};
+
+static inline int
+rta_seq_in_ptr(struct program *program, uint64_t src,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_IN_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if (flags & ~seq_in_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ IN PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & INL) && (flags & RJD)) {
+ pr_err("SEQ IN PTR: Invalid usage of INL and RJD flags\n");
+ goto err;
+ }
+ if ((src) && (flags & (SOP | RTO | PRE))) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & SOP) && (flags & (RBS | PRE | RTO | EXT))) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and (RBS or PRE or RTO or EXT) flags\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & RBS)
+ opcode |= SQIN_RBS;
+ if (flags & INL)
+ opcode |= SQIN_INL;
+ if (flags & SGF)
+ opcode |= SQIN_SGF;
+ if (flags & PRE)
+ opcode |= SQIN_PRE;
+ if (flags & RTO)
+ opcode |= SQIN_RTO;
+ if (flags & RJD)
+ opcode |= SQIN_RJD;
+ if (flags & SOP)
+ opcode |= SQIN_SOP;
+ if ((length >> 16) || (flags & EXT)) {
+ if (flags & SOP) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and EXT flags\n");
+ goto err;
+ }
+
+ opcode |= SQIN_EXT;
+ } else {
+ opcode |= length & SQIN_LEN_MASK;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQIN_PRE | SQIN_RTO | SQIN_SOP)))
+ __rta_out64(program, program->ps, src);
+
+ /* write extended length field */
+ if (opcode & SQIN_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_seq_out_ptr(struct program *program, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_OUT_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if (flags & ~seq_out_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ OUT PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if ((dst) && (flags & (RTO | PRE))) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & RST) && !(flags & RTO)) {
+ pr_err("SEQ OUT PTR: RST flag must be used with RTO flag\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & SGF)
+ opcode |= SQOUT_SGF;
+ if (flags & PRE)
+ opcode |= SQOUT_PRE;
+ if (flags & RTO)
+ opcode |= SQOUT_RTO;
+ if (flags & RST)
+ opcode |= SQOUT_RST;
+ if (flags & EWS)
+ opcode |= SQOUT_EWS;
+ if ((length >> 16) || (flags & EXT))
+ opcode |= SQOUT_EXT;
+ else
+ opcode |= length & SQOUT_LEN_MASK;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQOUT_PRE | SQOUT_RTO)))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & SQOUT_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_SEQ_IN_OUT_PTR_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
new file mode 100644
index 00000000..4f694ac2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_SIGNATURE_CMD_H__
+#define __RTA_SIGNATURE_CMD_H__
+
+static inline int
+rta_signature(struct program *program, uint32_t sign_type)
+{
+ uint32_t opcode = CMD_SIGNATURE;
+ unsigned int start_pc = program->current_pc;
+
+ switch (sign_type) {
+ case (SIGN_TYPE_FINAL):
+ case (SIGN_TYPE_FINAL_RESTORE):
+ case (SIGN_TYPE_FINAL_NONZERO):
+ case (SIGN_TYPE_IMM_2):
+ case (SIGN_TYPE_IMM_3):
+ case (SIGN_TYPE_IMM_4):
+ opcode |= sign_type;
+ break;
+ default:
+ pr_err("SIGNATURE Command: Invalid type selection\n");
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_SIGNATURE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
new file mode 100644
index 00000000..8b58e544
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef __RTA_STORE_CMD_H__
+#define __RTA_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t store_src_table[][2] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { DJQDA, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQDAR },
+ { MODE1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { MODE2, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { DJQCTRL, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQCTRL },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DSTAT, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_STAT },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID },
+ { CCTRL, LDST_SRCDST_WORD_CHACTRL },
+ { ICTRL, LDST_SRCDST_WORD_IRQCTRL },
+ { CLRW, LDST_SRCDST_WORD_CLRW },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0 },
+ { CSTAT, LDST_SRCDST_WORD_STAT },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1 },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2 },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF },
+/*30*/ { JOBDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_JOB },
+ { SHAREDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_SHARED },
+/*32*/ { JOBDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_JOB_WE },
+ { SHAREDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_SHARED_WE },
+/*34*/ { GTR, LDST_CLASS_DECO | LDST_SRCDST_WORD_GTR },
+ { STR, LDST_CLASS_DECO | LDST_SRCDST_WORD_STR }
+};
+
+/*
+ * Allowed STORE sources for each SEC ERA.
+ * Values represent the number of entries from source_src_table[] that are
+ * supported.
+ */
+static const unsigned int store_src_table_sz[] = {29, 31, 33, 33,
+ 33, 33, 35, 35};
+
+static inline int
+rta_store(struct program *program, uint64_t src,
+ uint16_t offset, uint64_t dst, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_STORE;
+ else
+ opcode = CMD_STORE;
+
+ /* parameters check */
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ if ((flags & IMMED) && (offset != 0)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((flags & SEQ) && ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) ||
+ (src == SHAREDESCBUF_EFF))) {
+ pr_err("STORE: Invalid SRC type. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (flags & IMMED)
+ opcode |= LDST_IMM;
+
+ if ((flags & SGF) || (flags & VLF))
+ opcode |= LDST_VLF;
+
+ /*
+ * source for data to be stored can be specified as:
+ * - register location; set in src field[9-15];
+ * - if IMMED flag is set, data is set in value field [0-31];
+ * user can give this value as actual value or pointer to data
+ */
+ if (!(flags & IMMED)) {
+ ret = __rta_map_opcode((uint32_t)src, store_src_table,
+ store_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("STORE: Invalid source. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if ((src == DESCBUF) || (src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF)) {
+ opcode |= (length >> 2);
+ opcode |= (uint32_t)((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (uint32_t)(offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF))
+ return (int)start_pc;
+
+ /* for STORE, a pointer to where the data will be stored if needed */
+ if (!(flags & SEQ))
+ __rta_out64(program, program->ps, dst);
+
+ /* for IMMED data, place the data here */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_STORE_CMD_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
new file mode 100644
index 00000000..de8ca970
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -0,0 +1,643 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpseci.h>
+#include <fsl_dpseci_cmd.h>
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
+{
+ struct dpseci_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_create() - Create the DPSECI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpseci_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
+ for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ cmd_params->priorities[i] = cfg->priorities[i];
+ cmd_params->num_tx_queues = cfg->num_tx_queues;
+ cmd_params->num_rx_queues = cfg->num_rx_queues;
+ cmd_params->options = cfg->options;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpseci_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpseci_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
+{
+ struct dpseci_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->options = rsp_params->options;
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct dpseci_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
+{
+ struct dpseci_rsp_get_rx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type =
+ dpseci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+ attr->order_preservation_en =
+ dpseci_get_field(rsp_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
+{
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
+{
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @counters: Returned SEC counters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
+{
+ struct dpseci_rsp_get_sec_counters *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
+ counters->dequeued_requests =
+ le64_to_cpu(rsp_params->dequeued_requests);
+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct dpseci_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct dpseci_cmd_set_congestion_notification *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ cmd_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cfg->dest_cfg.dest_id;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_ctx = cfg->message_ctx;
+ cmd_params->message_iova = cfg->message_iova;
+ cmd_params->notification_mode = cfg->notification_mode;
+ cmd_params->threshold_entry = cfg->threshold_entry;
+ cmd_params->threshold_exit = cfg->threshold_exit;
+ dpseci_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->type_units,
+ CG_UNITS,
+ cfg->units);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct dpseci_cmd_set_congestion_notification *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
+ DEST_TYPE);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
new file mode 100644
index 00000000..12ac005a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef __FSL_DPSECI_H
+#define __FSL_DPSECI_H
+
+/* Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPSECI object
+ */
+#define DPSECI_PRIO_NUM 8
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (uint8_t)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ uint32_t options;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t priorities[DPSECI_PRIO_NUM];
+};
+
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ */
+struct dpseci_attr {
+ int id;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint32_t options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO = 1,
+ DPSECI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ uint32_t options;
+ int order_preservation_en;
+ uint64_t user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration
+ * on the queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ uint64_t user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC.
+ * @major_rev: Major revision number for SEC.
+ * @minor_rev: Minor revision number for SEC.
+ * @era: SEC Era.
+ * @deco_num: The number of copies of the DECO that are implemented
+ * in this version of SEC.
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
+ * in this version of SEC.
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
+ * in this version of SEC.
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC.
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC.
+ * @crc_acc_num: The number of copies of the CRC module that are
+ * implemented in this version of SEC.
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC.
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC.
+ * @rng_acc_num: The number of copies of the Random Number Generator that
+ * are implemented in this version of SEC.
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that
+ * are implemented in this version of SEC.
+ * @arc4_acc_num: The number of copies of the ARC4 module that are
+ * implemented in this version of SEC.
+ * @des_acc_num: The number of copies of the DES module that are
+ * implemented in this version of SEC.
+ * @aes_acc_num: The number of copies of the AES module that are
+ * implemented in this version of SEC.
+ **/
+
+struct dpseci_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
+
+/**
+ * struct dpseci_sec_counters - Structure representing global SEC counters and
+ * not per dpseci counters
+ * @dequeued_requests: Number of Requests Dequeued
+ * @ob_enc_requests: Number of Outbound Encrypt Requests
+ * @ib_dec_requests: Number of Inbound Decrypt Requests
+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
+ * @ob_prot_bytes: Number of Outbound Bytes Protected
+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
+ * @ib_valid_bytes: Number of Inbound Bytes Validated
+ */
+struct dpseci_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* __FSL_DPSECI_H */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
new file mode 100644
index 00000000..26cef0f7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ */
+#ifndef _FSL_DPSECI_CMD_H
+#define _FSL_DPSECI_CMD_H
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 1
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
+#define DPSECI_CMD_V2(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
+
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpseci_cmd_open {
+ uint32_t dpseci_id;
+};
+
+struct dpseci_cmd_create {
+ uint8_t priorities[8];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad[6];
+ uint32_t options;
+};
+
+struct dpseci_cmd_destroy {
+ uint32_t dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ /* only the first LSB */
+ uint8_t en;
+};
+
+struct dpseci_rsp_get_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad1[6];
+ uint32_t options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t queue;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+};
+
+struct dpseci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t queue;
+};
+
+struct dpseci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+
+};
+
+struct dpseci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t pad1[3];
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t pad2;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pad3;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t pad4;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+struct dpseci_rsp_get_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+struct dpseci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+#define DPSECI_CG_UNITS_SHIFT 4
+#define DPSECI_CG_UNITS_SIZE 2
+
+struct dpseci_cmd_set_congestion_notification {
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPSECI_CMD_H */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build
new file mode 100644
index 00000000..01afc587
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['security', 'mempool_dpaa2']
+sources = files('dpaa2_sec_dpseci.c',
+ 'mc/dpseci.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'hw')
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile b/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile
new file mode 100644
index 00000000..9be44704
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_sec.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
+
+# library dependencies
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
new file mode 100644
index 00000000..f571050b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -0,0 +1,2419 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <of.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+enum rta_sec_era rta_sec_era;
+
+int dpaa_logtype_sec;
+
+static uint8_t cryptodev_driver_id;
+
+static __thread struct rte_crypto_op **dpaa_sec_ops;
+static __thread int dpaa_sec_op_nb;
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
+static inline void
+dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
+{
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
+{
+ struct dpaa_sec_op_ctx *ctx;
+ int retval;
+
+ retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || retval) {
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+ ctx->vtop_offset = (size_t) ctx
+ - rte_mempool_virt2iova(ctx);
+
+ return ctx;
+}
+
+static inline rte_iova_t
+dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
+}
+
+static inline void *
+dpaa_mem_ptov(rte_iova_t paddr)
+{
+ return rte_mem_iova2virt(paddr);
+}
+
+static void
+ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
+}
+
+/* initialize the queue with dest chan as caam chan so that
+ * all the packets in this queue could be dispatched into caam
+ */
+static int
+dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
+ uint32_t fqid_out)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t flags;
+ int ret = -1;
+
+ /* Clear FQ options */
+ memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+
+ qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
+ fq_opts.fqd.context_b = fqid_out;
+ fq_opts.fqd.dest.channel = qm_channel_caam;
+ fq_opts.fqd.dest.wq = 0;
+
+ fq_in->cb.ern = ern_sec_fq_handler;
+
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
+
+ ret = qman_init_fq(fq_in, flags, &fq_opts);
+ if (unlikely(ret != 0))
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
+
+ return ret;
+}
+
+/* something is put into in_fq and caam put the crypto result into out_fq */
+static enum qman_cb_dqrr_result
+dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
+ struct qman_fq *fq __always_unused,
+ const struct qm_dqrr_entry *dqrr)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+
+ if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
+ return qman_cb_dqrr_defer;
+
+ if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
+ return qman_cb_dqrr_consume;
+
+ fd = &dqrr->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
+ dpaa_sec_op_ending(ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+/* caam result is put into this queue */
+static int
+dpaa_sec_init_tx(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcc_initfq opts;
+ uint32_t flags;
+
+ flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ ret = qman_create_fq(0, flags, fq);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("qman_create_fq failed");
+ return ret;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+
+ /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
+
+ fq->cb.dqrr = dqrr_out_fq_cb_rx;
+ fq->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("unable to init caam source fq!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int is_cipher_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_auth_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_aead(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int is_auth_cipher(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
+}
+
+static inline int is_encode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
+{
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
+{
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
+{
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+
+/* prepare command block of the session */
+static int
+dpaa_sec_prep_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ int32_t shared_desc_len = 0;
+ struct sec_cdb *cdb = &ses->cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported aead alg");
+ return -ENOTSUP;
+ }
+ alginfo.key = (size_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (is_proto_ipsec(ses)) {
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &alginfo_c, &alginfo_a);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, &ses->decap_pdb,
+ &alginfo_c, &alginfo_a);
+ }
+ } else {
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ }
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+ cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
+ cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
+
+ return 0;
+}
+
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+{
+ struct qman_fq *fq;
+ unsigned int pkts = 0;
+ int num_rx_bufs, ret;
+ struct qm_dqrr_entry *dq;
+ uint32_t vdqcr_flags = 0;
+
+ fq = &qp->outq;
+ /*
+ * Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_ops < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_ops;
+ } else {
+ num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
+ if (ret)
+ return 0;
+
+ do {
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_crypto_op *op;
+
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+
+ fd = &dq->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ op = ctx->op;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ op->sym->m_src->pkt_len = len;
+ op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ops[pkts++] = op;
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+ return pkts;
+}
+
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ phys_addr_t start_addr;
+ uint8_t *old_digest, extra_segs;
+
+ if (is_decode(ses))
+ extra_segs = 3;
+ else
+ extra_segs = 2;
+
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+ out_sg->length = ses->digest_length;
+ cpu_to_hw_sg(out_sg);
+
+ /* input */
+ in_sg = &cf->sg[1];
+ /* need to extend the input to a compound frame */
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->auth.data.length;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+
+ /* 1st seg */
+ sg = in_sg + 1;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* Digest verification case */
+ cpu_to_hw_sg(sg);
+ sg++;
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ start_addr = dpaa_mem_vtop(old_digest);
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ in_sg->length += ses->digest_length;
+ } else {
+ /* Digest calculation case */
+ sg->length -= ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ cpu_to_hw_sg(in_sg);
+
+ return cf;
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct dpaa_sec_job *
+build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t start_addr;
+ uint8_t *old_digest;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ start_addr = rte_pktmbuf_iova(mbuf);
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+ if (is_decode(ses)) {
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ sg->length = sym->auth.data.length + ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ /* hash result or digest, save digest first */
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ cpu_to_hw_sg(sg);
+
+ /* let's check digest by hw */
+ start_addr = dpaa_mem_vtop(old_digest);
+ sg++;
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ out_sg->length = sym->cipher.data.length;
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 1st seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ sg->final = 1;
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (ses->auth_only_len)
+ req_segs++;
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->aead.data.length + ses->auth_only_len
+ + ses->digest_length;
+ else
+ out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset +
+ ses->auth_only_len;
+ sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len;
+ else
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg auth only */
+ if (ses->auth_only_len) {
+ sg++;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ cpu_to_hw_sg(sg);
+ }
+
+ /* 3rd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset;
+ sg->offset = sym->aead.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ uint32_t length = 0;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg,
+ dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
+ sg->length = sym->aead.data.length + ses->auth_only_len;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->auth.data.length + ses->digest_length;
+ else
+ out_sg->length = sym->auth.data.length;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->auth.data.length;
+ else
+ in_sg->length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ sg->length -= ses->digest_length;
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ phys_addr_t src_start_addr, dst_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* input */
+ sg = &cf->sg[1];
+ qm_sg_entry_set64(sg, src_start_addr);
+ sg->length = sym->m_src->pkt_len;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr);
+ sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static uint16_t
+dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+ uint16_t num_tx = 0;
+ struct qm_fd fds[DPAA_SEC_BURST], *fd;
+ uint32_t frames_to_send;
+ struct rte_crypto_op *op;
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ uint32_t auth_only_len;
+ struct qman_fq *inq[DPAA_SEC_BURST];
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+ DPAA_SEC_BURST : nb_ops;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ op = *(ops++);
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ DPAA_SEC_DP_ERR(
+ "sessionless crypto op not supported");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
+ if (dpaa_sec_attach_sess_q(qp, ses)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ } else {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only_sg(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only_sg(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm_sg(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth_sg(op, ses);
+ } else {
+ DPAA_SEC_DP_ERR("not supported ops");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+ if (unlikely(!cf)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+
+ fd = &fds[loop];
+ inq[loop] = ses->inq;
+ fd->opaque_addr = 0;
+ fd->cmd = 0;
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ fd->_format1 = qm_fd_compound;
+ fd->length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the fd.cmd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd->cmd = 0x80000000 | auth_only_len;
+
+ }
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+ frames_to_send - loop);
+ }
+ nb_ops -= frames_to_send;
+ num_tx += frames_to_send;
+ }
+
+ dpaa_qp->tx_pkts += num_tx;
+ dpaa_qp->tx_errs += nb_ops - num_tx;
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t num_rx;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+
+ num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
+
+ dpaa_qp->rx_pkts += num_rx;
+ dpaa_qp->rx_errs += nb_ops - num_rx;
+
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ DPAA_SEC_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ DPAA_SEC_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of session structure */
+static unsigned int
+dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa_sec_session);
+}
+
+static int
+dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ DPAA_SEC_ERR("No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
+{
+ unsigned int i;
+
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (qi->inq_attach[i] == 0) {
+ qi->inq_attach[i] = 1;
+ return &qi->inq[i];
+ }
+ }
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
+
+ return NULL;
+}
+
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+ unsigned int i;
+
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (&qi->inq[i] == fq) {
+ qman_retire_fq(fq, NULL);
+ qman_oos_fq(fq);
+ qi->inq_attach[i] = 0;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+ int ret;
+
+ sess->qp = qp;
+ ret = dpaa_sec_prep_cdb(sess);
+ if (ret) {
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
+ return -1;
+ }
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
+ ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+ qman_fq_fqid(&qp->outq));
+ if (ret)
+ DPAA_SEC_ERR("Unable to init sec queue");
+
+ return ret;
+}
+
+static int
+dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ dpaa_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ DPAA_SEC_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ dpaa_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ dpaa_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ dpaa_sec_cipher_init(dev, xform, session);
+ dpaa_sec_auth_init(dev, xform->next, session);
+ } else {
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ dpaa_sec_auth_init(dev, xform, session);
+ dpaa_sec_cipher_init(dev, xform->next, session);
+ } else {
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa_sec_aead_init(dev, xform, session);
+
+ } else {
+ DPAA_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ DPAA_SEC_ERR("unable to attach sec queue");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+
+ return -EINVAL;
+}
+
+static int
+dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ DPAA_SEC_ERR("failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct dpaa_sec_dev_private *qi = dev->data->dev_private;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ if (s->inq)
+ dpaa_sec_detach_rxq(qi, s->inq);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(dpaa_sec_session));
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ session->proto_alg = conf->protocol;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
+ goto out;
+ }
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_v = IPVERSION;
+ session->ip4_hdr.ip_hl = 5;
+ session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ session->ip4_hdr.ip_id = 0;
+ session->ip4_hdr.ip_off = 0;
+ session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+ : IPPROTO_AH;
+ session->ip4_hdr.ip_sum = 0;
+ session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+ (void *)&session->ip4_hdr,
+ sizeof(struct ip));
+
+ session->encap_pdb.options =
+ (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ session->encap_pdb.spi = ipsec_xform->spi;
+ session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ session->decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ } else
+ goto out;
+ session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ DPAA_SEC_ERR("unable to attach sec queue");
+ goto out;
+ }
+
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+ return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ DPAA_SEC_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+
+static int
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
+ return 0;
+}
+
+static void
+dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa_sec_dev_configure,
+ .dev_start = dpaa_sec_dev_start,
+ .dev_stop = dpaa_sec_dev_stop,
+ .dev_close = dpaa_sec_dev_close,
+ .dev_infos_get = dpaa_sec_dev_infos_get,
+ .queue_pair_setup = dpaa_sec_queue_pair_setup,
+ .queue_pair_release = dpaa_sec_queue_pair_release,
+ .queue_pair_count = dpaa_sec_queue_pair_count,
+ .sym_session_get_size = dpaa_sec_sym_session_get_size,
+ .sym_session_configure = dpaa_sec_sym_session_configure,
+ .sym_session_clear = dpaa_sec_sym_session_clear
+};
+
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+ .session_create = dpaa_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa_sec_capabilities_get
+};
+
+static int
+dpaa_sec_uninit(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
+ /* In case close has been called, internals->ctx_pool would be NULL */
+ rte_mempool_free(internals->ctx_pool);
+ rte_free(internals);
+
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct rte_security_ctx *security_instance;
+ struct dpaa_sec_qp *qp;
+ uint32_t i, flags;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
+ internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DPAA_SEC_WARN("Device already init by primary process");
+ return 0;
+ }
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
+ for (i = 0; i < internals->max_nb_queue_pairs; i++) {
+ /* init qman fq for queue pair */
+ qp = &internals->qps[i];
+ ret = dpaa_sec_init_tx(&qp->outq);
+ if (ret) {
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
+ goto init_error;
+ }
+ }
+
+ flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL;
+ for (i = 0; i < internals->max_nb_sessions; i++) {
+ /* create rx qman fq for sessions*/
+ ret = qman_create_fq(0, flags, &internals->inq[i]);
+ if (unlikely(ret != 0)) {
+ DPAA_SEC_ERR("sec qman_create_fq failed");
+ goto init_error;
+ }
+ }
+
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
+
+ dpaa_sec_uninit(cryptodev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa_dev->crypto_dev = cryptodev;
+ cryptodev->device = &dpaa_dev->device;
+ cryptodev->device->driver = &dpaa_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
+ break;
+ }
+ }
+ }
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ /* In case of error, cleanup is done */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa_dev->crypto_dev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa_driver rte_dpaa_sec_driver = {
+ .drv_type = FSL_DPAA_CRYPTO,
+ .driver = {
+ .name = "DPAA SEC PMD"
+ },
+ .probe = cryptodev_dpaa_sec_probe,
+ .remove = cryptodev_dpaa_sec_remove,
+};
+
+static struct cryptodev_driver dpaa_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
new file mode 100644
index 00000000..ac6c00a6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -0,0 +1,452 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2016 NXP
+ *
+ */
+
+#ifndef _DPAA_SEC_H_
+#define _DPAA_SEC_H_
+
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
+/**< NXP DPAA - SEC PMD device name */
+
+#define NUM_POOL_CHANNELS 4
+#define DPAA_SEC_BURST 7
+#define DPAA_SEC_ALG_UNSUPPORT (-1)
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
+#define CTX_POOL_CACHE_SIZE 512
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+enum dpaa_sec_op_type {
+ DPAA_SEC_NONE, /*!< No Cipher operations*/
+ DPAA_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA_SEC_AUTH, /*!< Authentication Operations */
+ DPAA_SEC_AEAD, /*!< Authenticated Encryption with associated data */
+ DPAA_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA_SEC_MAX
+};
+
+
+#define DPAA_SEC_MAX_DESC_SIZE 64
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __packed lo;
+ } __packed sh_hdr;
+
+ uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
+};
+
+typedef struct dpaa_sec_session_entry {
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+ enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ struct ipsec_decap_pdb decap_pdb;
+ struct dpaa_sec_qp *qp;
+ struct qman_fq *inq;
+ struct sec_cdb cdb; /**< cmd block associated with qp */
+ struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
+struct dpaa_sec_qp {
+ struct dpaa_sec_dev_private *internals;
+ struct qman_fq outq;
+ int rx_pkts;
+ int rx_errs;
+ int tx_pkts;
+ int tx_errs;
+};
+
+#define RTE_DPAA_MAX_NB_SEC_QPS 8
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+
+/* internal sec queue interface */
+struct dpaa_sec_dev_private {
+ void *sec_hw;
+ struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
+ struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+ unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+struct dpaa_sec_job {
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct qm_sg_entry sg[MAX_SG_ENTRIES];
+};
+
+#define DPAA_MAX_NB_MAX_DIGEST 32
+struct dpaa_sec_op_ctx {
+ struct dpaa_sec_job job;
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
+ uint32_t fd_status;
+ int64_t vtop_offset;
+ uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
+};
+
+static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
+#endif /* _DPAA_SEC_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h
new file mode 100644
index 00000000..fb895a8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#ifndef _DPAA_SEC_LOG_H_
+#define _DPAA_SEC_LOG_H_
+
+extern int dpaa_logtype_sec;
+
+#define DPAA_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>")
+
+#define DPAA_SEC_INFO(fmt, args...) \
+ DPAA_SEC_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_ERR(fmt, args...) \
+ DPAA_SEC_LOG(ERR, fmt, ## args)
+#define DPAA_SEC_WARN(fmt, args...) \
+ DPAA_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA_SEC_DP_DEBUG(fmt, args...) \
+ DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_SEC_DP_INFO(fmt, args...) \
+ DPAA_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_DP_WARN(fmt, args...) \
+ DPAA_SEC_DP_LOG(WARNING, fmt, ## args)
+#define DPAA_SEC_DP_ERR(fmt, args...) \
+ DPAA_SEC_DP_LOG(ERR, fmt, ## args)
+
+#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build b/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build
new file mode 100644
index 00000000..8a570984
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa', 'security']
+sources = files('dpaa_sec.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
diff --git a/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map b/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/Makefile b/src/spdk/dpdk/drivers/crypto/kasumi/Makefile
new file mode 100644
index 00000000..cafe9498
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_KASUMI_PATH),)
+$(error "Please define LIBSSO_KASUMI_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_kasumi.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_kasumi_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
+LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
new file mode 100644
index 00000000..239a1cf4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum kasumi_operation
+kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return KASUMI_OP_AUTH_CIPHER;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return KASUMI_OP_CIPHER_AUTH;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ return KASUMI_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum kasumi_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = kasumi_get_mode(xform);
+
+ switch (mode) {
+ case KASUMI_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case KASUMI_OP_NOT_SUPPORTED:
+ default:
+ KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only KASUMI F8 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+ KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
+ return -ENOTSUP;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+ if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+ KASUMI_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+
+ /* Initialize key */
+ sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only KASUMI F9 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+ KASUMI_LOG(ERR, "Unsupported authentication");
+ return -ENOTSUP;
+ }
+
+ if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+ KASUMI_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ /* Initialize key */
+ sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get KASUMI session. */
+static struct kasumi_session *
+kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
+{
+ struct kasumi_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct kasumi_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct kasumi_session *)_sess_private_data;
+
+ if (unlikely(kasumi_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[num_ops], *dst[num_ops];
+ uint8_t *iv_ptr;
+ uint64_t iv[num_ops];
+ uint32_t num_bytes[num_ops];
+
+ for (i = 0; i < num_ops; i++) {
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ iv[i] = *((uint64_t *)(iv_ptr));
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ if (processed_ops != 0)
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, iv,
+ src, dst, num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
+ struct kasumi_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv_ptr;
+ uint64_t iv;
+ uint32_t length_in_bits, offset_in_bits;
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG(ERR, "bit-level in-place not supported");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ iv = *((uint64_t *)(iv_ptr));
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint32_t num_bytes;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG(ERR, "Invalid Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ /* Direction from next bit after end of message */
+ num_bytes = length_in_bits >> 3;
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
+
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ KASUMI_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_ops = process_kasumi_cipher_op(ops,
+ session, num_ops);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
+ num_ops);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_ops = process_kasumi_cipher_op(ops, session,
+ num_ops);
+ process_kasumi_hash_op(qp, ops, session, processed_ops);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
+ num_ops);
+ process_kasumi_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct kasumi_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_op = process_kasumi_cipher_op_bit(op,
+ session);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_op = process_kasumi_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_kasumi_hash_op(qp, &op, session, 1);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+ if (processed_op == 1)
+ process_kasumi_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct kasumi_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
+ processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[nb_ops];
+ struct rte_crypto_op *curr_c_op;
+
+ struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+ struct kasumi_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
+ (curr_c_op->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.", curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = kasumi_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == KASUMI_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct kasumi_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_kasumi_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct kasumi_private *internals;
+ uint64_t cpu_flags = 0;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ KASUMI_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ else
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_kasumi_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = kasumi_pmd_dequeue_burst;
+ dev->enqueue_burst = kasumi_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ KASUMI_LOG(ERR, "driver %s: failed",
+ init_params->name);
+
+ cryptodev_kasumi_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct kasumi_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_kasumi_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_kasumi_remove
+};
+
+static struct cryptodev_driver kasumi_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(kasumi_init_log)
+{
+ kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
new file mode 100644
index 00000000..9e4bf1b5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+kasumi_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+kasumi_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct kasumi_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = kasumi_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_ops);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct kasumi_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "kasumi_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) == ring_size) {
+ KASUMI_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct kasumi_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ kasumi_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (kasumi_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+kasumi_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the KASUMI session structure */
+static unsigned
+kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct kasumi_session);
+}
+
+/** Configure a KASUMI session from a crypto xform chain */
+static int
+kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ KASUMI_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ KASUMI_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = kasumi_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ KASUMI_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct kasumi_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+ .dev_configure = kasumi_pmd_config,
+ .dev_start = kasumi_pmd_start,
+ .dev_stop = kasumi_pmd_stop,
+ .dev_close = kasumi_pmd_close,
+
+ .stats_get = kasumi_pmd_stats_get,
+ .stats_reset = kasumi_pmd_stats_reset,
+
+ .dev_infos_get = kasumi_pmd_info_get,
+
+ .queue_pair_setup = kasumi_pmd_qp_setup,
+ .queue_pair_release = kasumi_pmd_qp_release,
+ .queue_pair_count = kasumi_pmd_qp_count,
+
+ .sym_session_get_size = kasumi_pmd_sym_session_get_size,
+ .sym_session_configure = kasumi_pmd_sym_session_configure,
+ .sym_session_clear = kasumi_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
new file mode 100644
index 00000000..488777ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
+#define _RTE_KASUMI_PMD_PRIVATE_H_
+
+#include <sso_kasumi.h>
+
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< KASUMI PMD device name */
+
+/** KASUMI PMD LOGTYPE DRIVER */
+int kasumi_logtype_driver;
+
+#define KASUMI_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define KASUMI_DIGEST_LENGTH 4
+
+/** private data structure for each virtual KASUMI device */
+struct kasumi_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** KASUMI buffer queue pair */
+struct kasumi_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum kasumi_operation {
+ KASUMI_OP_ONLY_CIPHER,
+ KASUMI_OP_ONLY_AUTH,
+ KASUMI_OP_CIPHER_AUTH,
+ KASUMI_OP_AUTH_CIPHER,
+ KASUMI_OP_NOT_SUPPORTED
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ sso_kasumi_key_sched_t pKeySched_cipher;
+ sso_kasumi_key_sched_t pKeySched_hash;
+ enum kasumi_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
+
+#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
new file mode 100644
index 00000000..8ffeca93
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
@@ -0,0 +1,3 @@
+DPDK_16.07 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/meson.build b/src/spdk/dpdk/drivers/crypto/meson.build
new file mode 100644
index 00000000..d64ca418
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
+ 'null', 'openssl', 'qat', 'virtio']
+
+std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/Makefile b/src/spdk/dpdk/drivers/crypto/mvsam/Makefile
new file mode 100644
index 00000000..c3dc72c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvsam_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvsam_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/meson.build b/src/spdk/dpdk/drivers/crypto/mvsam/meson.build
new file mode 100644
index 00000000..3c8ea3cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
+
+deps += ['bus_vdev']
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h
new file mode 100644
index 00000000..4ab28d39
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_compat.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "env/mv_autogen_comp_flags.h"
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c
new file mode 100644
index 00000000..73eff757
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -0,0 +1,937 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
+
+static uint8_t cryptodev_driver_id;
+
+struct mrvl_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params common;
+ uint32_t max_nb_sessions;
+};
+
+const char *mrvl_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ MRVL_PMD_MAX_NB_SESS_ARG
+};
+
+/**
+ * Flag if particular crypto algorithm is supported by PMD/MUSDK.
+ *
+ * The idea is to have Not Supported value as default (0).
+ * This way we need only to define proper map sizes,
+ * non-initialized entries will be by default not supported.
+ */
+enum algo_supported {
+ ALGO_NOT_SUPPORTED = 0,
+ ALGO_SUPPORTED = 1,
+};
+
+/** Map elements for cipher mapping.*/
+struct cipher_params_mapping {
+ enum algo_supported supported; /**< On/Off switch */
+ enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
+ enum sam_cipher_mode cipher_mode; /**< Cipher mode */
+ unsigned int max_key_len; /**< Maximum key length (in bytes)*/
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/** Map elements for auth mapping.*/
+struct auth_params_mapping {
+ enum algo_supported supported; /**< On/off switch */
+ enum sam_auth_alg auth_alg; /**< Auth algorithm */
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/**
+ * Map of supported cipher algorithms.
+ */
+static const
+struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_AES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/**
+ * Map of supported auth algorithms.
+ */
+static const
+struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_MD5 },
+ [RTE_CRYPTO_AUTH_MD5] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_MD5 },
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA1] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_224 },
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA256] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA384] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
+ [RTE_CRYPTO_AUTH_SHA512] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_512 },
+ [RTE_CRYPTO_AUTH_AES_GMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_AES_GMAC },
+};
+
+/**
+ * Map of supported aead algorithms.
+ */
+static const
+struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
+ [RTE_CRYPTO_AEAD_AES_GCM] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_GCM,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/*
+ *-----------------------------------------------------------------------------
+ * Forward declarations.
+ *-----------------------------------------------------------------------------
+ */
+static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
+
+/*
+ *-----------------------------------------------------------------------------
+ * Session Preparation.
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Get xform chain order.
+ *
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns Order of crypto operations.
+ */
+static enum mrvl_crypto_chain_order
+mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ /* Currently, Marvell supports max 2 operations in chain */
+ if (xform->next != NULL && xform->next->next != NULL)
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->next != NULL) {
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
+ return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
+ return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return MRVL_CRYPTO_CHAIN_COMBINED;
+ }
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+/**
+ * Set session parameters for cipher part.
+ *
+ * @param sess Crypto session pointer.
+ * @param cipher_xform Pointer to configuration structure for cipher operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform)
+{
+ /* Make sure we've got proper struct */
+ if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
+ (cipher_map[cipher_xform->cipher.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ sess->sam_sess_params.dir =
+ (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ cipher_map[cipher_xform->cipher.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ cipher_map[cipher_xform->cipher.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (cipher_xform->cipher.key.length >
+ cipher_map[cipher_xform->cipher.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
+ sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for authentication part.
+ *
+ * @param sess Crypto session pointer.
+ * @param auth_xform Pointer to configuration structure for auth operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ /* Make sure we've got proper struct */
+ if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
+ (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.auth_alg =
+ auth_map[auth_xform->auth.algo].auth_alg;
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ auth_xform->auth.digest_length;
+ /* auth_key must be NULL if auth algorithm does not use HMAC */
+ sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
+ auth_xform->auth.key.data : NULL;
+ sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for aead part.
+ *
+ * @param sess Crypto session pointer.
+ * @param aead_xform Pointer to configuration structure for aead operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *aead_xform)
+{
+ /* Make sure we've got proper struct */
+ if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
+ (aead_map[aead_xform->aead.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ aead_map[aead_xform->aead.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ aead_map[aead_xform->aead.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (aead_xform->aead.key.length >
+ aead_map[aead_xform->aead.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
+ sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
+
+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
+ sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
+
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ aead_xform->aead.digest_length;
+
+ sess->sam_sess_params.u.basic.auth_aad_len =
+ aead_xform->aead.aad_length;
+
+ return 0;
+}
+
+/**
+ * Parse crypto transform chain and setup session parameters.
+ *
+ * @param dev Pointer to crypto device
+ * @param sess Poiner to crypto session
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = mrvl_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
+ cipher_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
+ auth_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cipher_xform != NULL) &&
+ (mrvl_crypto_set_cipher_session_parameters(
+ sess, cipher_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+
+ if ((auth_xform != NULL) &&
+ (mrvl_crypto_set_auth_session_parameters(
+ sess, auth_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+
+ if ((aead_xform != NULL) &&
+ (mrvl_crypto_set_aead_session_parameters(
+ sess, aead_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Process Operations
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Prepare a single request.
+ *
+ * This function basically translates DPDK crypto request into one
+ * understandable by MUDSK's SAM. If this is a first request in a session,
+ * it starts the session.
+ *
+ * @param request Pointer to pre-allocated && reset request buffer [Out].
+ * @param src_bd Pointer to pre-allocated source descriptor [Out].
+ * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
+ * @param op Pointer to DPDK crypto operation struct [In].
+ */
+static inline int
+mrvl_request_prepare(struct sam_cio_op_params *request,
+ struct sam_buf_info *src_bd,
+ struct sam_buf_info *dst_bd,
+ struct rte_crypto_op *op)
+{
+ struct mrvl_crypto_session *sess;
+ struct rte_mbuf *dst_mbuf;
+ uint8_t *digest;
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless.",
+ op);
+ return -EINVAL;
+ }
+
+ sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ if (unlikely(sess == NULL)) {
+ MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ return -EINVAL;
+ }
+
+ /*
+ * If application delivered us null dst buffer, it means it expects
+ * us to deliver the result in src buffer.
+ */
+ dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ /* Single buffers only, sorry. */
+ request->num_bufs = 1;
+ request->src = src_bd;
+ src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
+ src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
+
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ return -1;
+ }
+
+ /* Empty destination. */
+ if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
+ /* Make dst buffer fit at least source data. */
+ if (rte_pktmbuf_append(dst_mbuf,
+ rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ return -1;
+ }
+ }
+
+ request->dst = dst_bd;
+ dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
+
+ /*
+ * We can use all available space in dst_mbuf,
+ * not only what's used currently.
+ */
+ dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
+
+ if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
+ request->cipher_len = op->sym->aead.data.length;
+ request->cipher_offset = op->sym->aead.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_aad = op->sym->aead.aad.data;
+ request->auth_offset = request->cipher_offset;
+ request->auth_len = request->cipher_len;
+ } else {
+ request->cipher_len = op->sym->cipher.data.length;
+ request->cipher_offset = op->sym->cipher.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_offset = op->sym->auth.data.offset;
+ request->auth_len = op->sym->auth.data.length;
+ }
+
+ digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
+ op->sym->aead.digest.data : op->sym->auth.digest.data;
+ if (digest == NULL) {
+ /* No auth - no worry. */
+ return 0;
+ }
+
+ request->auth_icv_offset = request->auth_offset + request->auth_len;
+
+ /*
+ * EIP supports only scenarios where ICV(digest buffer) is placed at
+ * auth_icv_offset. Any other placement means risking errors.
+ */
+ if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
+ /*
+ * This should be the most common case anyway,
+ * EIP will overwrite DST buffer at auth_icv_offset.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ dst_mbuf, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
+ /*
+ * EIP will look for digest at auth_icv_offset
+ * offset in SRC buffer.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ op->sym->m_src, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ }
+
+ /*
+ * If we landed here it means that digest pointer is
+ * at different than expected place.
+ */
+ return -1;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * PMD Framework handlers
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Enqueue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements consumed from ops.
+ */
+static uint16_t
+mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t iter_ops = 0;
+ uint16_t to_enq = 0;
+ uint16_t consumed = 0;
+ int ret;
+ struct sam_cio_op_params requests[nb_ops];
+ /*
+ * DPDK uses single fragment buffers, so we can KISS descriptors.
+ * SAM does not store bd pointers, so on-stack scope will be enough.
+ */
+ struct sam_buf_info src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
+
+ if (nb_ops == 0)
+ return 0;
+
+ /* Prepare the burst. */
+ memset(&requests, 0, sizeof(requests));
+
+ /* Iterate through */
+ for (; iter_ops < nb_ops; ++iter_ops) {
+ if (mrvl_request_prepare(&requests[iter_ops],
+ &src_bd[iter_ops],
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_CRYPTO_LOG_ERR(
+ "Error while parameters preparation!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+ break;
+ }
+
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ /* Increase the number of ops to enqueue. */
+ ++to_enq;
+ } /* for (; iter_ops < nb_ops;... */
+
+ if (to_enq > 0) {
+ /* Send the burst */
+ ret = sam_cio_enq(qp->cio, requests, &to_enq);
+ consumed += to_enq;
+ if (ret < 0) {
+ /*
+ * Trust SAM that in this case returned value will be at
+ * some point correct (now it is returned unmodified).
+ */
+ qp->stats.enqueue_err_count += to_enq;
+ for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ qp->stats.enqueued_count += to_enq;
+ return consumed;
+}
+
+/**
+ * Dequeue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements dequeued.
+ */
+static uint16_t
+mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int ret;
+ struct mrvl_crypto_qp *qp = queue_pair;
+ struct sam_cio *cio = qp->cio;
+ struct sam_cio_op_result results[nb_ops];
+ uint16_t i;
+
+ ret = sam_cio_deq(cio, results, &nb_ops);
+ if (ret < 0) {
+ /* Count all dequeued as error. */
+ qp->stats.dequeue_err_count += nb_ops;
+
+ /* But act as they were dequeued anyway*/
+ qp->stats.dequeued_count += nb_ops;
+
+ return 0;
+ }
+
+ /* Unpack and check results. */
+ for (i = 0; i < nb_ops; ++i) {
+ ops[i] = results[i].cookie;
+
+ switch (results[i].status) {
+ case SAM_CIO_OK:
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case SAM_CIO_ERR_ICV:
+ MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ break;
+ default:
+ MRVL_CRYPTO_LOG_DBG(
+ "CIO returned Error: %d", results[i].status);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+ }
+
+ qp->stats.dequeued_count += nb_ops;
+ return nb_ops;
+}
+
+/**
+ * Create a new crypto device.
+ *
+ * @param name Driver name.
+ * @param vdev Pointer to device structure.
+ * @param init_params Pointer to initialization parameters.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct mrvl_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct mrvl_crypto_private *internals;
+ struct sam_init_params sam_params;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->common);
+ if (dev == NULL) {
+ MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_mrvl_crypto_pmd_ops;
+
+ /* Register rx/tx burst functions for data path. */
+ dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
+ dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized.
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ return ret;
+
+ MRVL_CRYPTO_LOG_INFO(
+ "DMA memory has been already initialized by a different driver.");
+ }
+
+ sam_params.max_num_sessions = internals->max_nb_sessions;
+
+ return sam_init(&sam_params);
+
+init_error:
+ MRVL_CRYPTO_LOG_ERR(
+ "driver %s: %s failed", init_params->common.name, __func__);
+
+ cryptodev_mrvl_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ mrvl_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Common VDEV parameters */
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->common.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &parse_integer_arg,
+ &params->common.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &parse_name_arg,
+ &params->common);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ MRVL_PMD_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/**
+ * Initialize the crypto device.
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct mrvl_pmd_init_params init_params = {
+ .common = {
+ .name = "",
+ .private_data_size =
+ sizeof(struct mrvl_crypto_private),
+ .max_nb_queue_pairs =
+ sam_get_num_inst() * SAM_HW_RING_NUM,
+ .socket_id = rte_socket_id()
+ },
+ .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
+ };
+
+ const char *name, *args;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ args = rte_vdev_device_args(vdev);
+
+ ret = mrvl_pmd_parse_input_args(&init_params, args);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
+}
+
+/**
+ * Uninitialize the crypto device
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing Marvell crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ sam_deinit();
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+/**
+ * Basic driver handlers for use in the constructor.
+ */
+static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
+ .probe = cryptodev_mrvl_crypto_init,
+ .remove = cryptodev_mrvl_crypto_uninit
+};
+
+static struct cryptodev_driver mrvl_crypto_drv;
+
+/* Register the driver in constructor. */
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
new file mode 100644
index 00000000..c045562c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+/**
+ * Capabilities list to be used in reporting to DPDK.
+ */
+static const struct rte_cryptodev_capabilities
+ mrvl_crypto_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/**
+ * Configure device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param config Pointer to configuration structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/**
+ * Start device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/**
+ * Stop device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/**
+ * Get device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param stats Pointer to statistics structure [out].
+ */
+static void
+mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/**
+ * Reset device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ */
+static void
+mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+/**
+ * Get device info (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param dev_info Pointer to the device info structure [out].
+ */
+static void
+mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct mrvl_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = mrvl_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/**
+ * Release queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of Queue Pair to release.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct mrvl_crypto_qp *qp =
+ (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id];
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ sam_cio_flush(qp->cio);
+ sam_cio_deinit(qp->cio);
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * Close device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_close(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ return 0;
+}
+
+/**
+ * Setup a queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @param qp_conf Queue pair configuration (nb of descriptors).
+ * @param socket_id NUMA socket to allocate memory on.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct mrvl_crypto_qp *qp = NULL;
+ char match[RTE_CRYPTODEV_NAME_MAX_LEN];
+ unsigned int n;
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ /* Free old qp prior setup if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ do { /* Error handling block */
+
+ /*
+ * This extra check is necessary due to a bug in
+ * crypto library.
+ */
+ int num = sam_get_num_inst();
+ if (num == 0) {
+ MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ return -1;
+ }
+
+ /*
+ * In case two crypto engines are enabled qps will
+ * be evenly spread among them. Even and odd qps will
+ * be handled by cio-0 and cio-1 respectively. qp-cio mapping
+ * will look as follows:
+ *
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1
+ *
+ * qp: 4 5 6 7
+ * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3
+ *
+ * In case just one engine is enabled mapping will look as
+ * follows:
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3
+ */
+ n = snprintf(match, sizeof(match), "cio-%u:%u",
+ qp_id % num, qp_id / num);
+
+ if (n >= sizeof(match))
+ break;
+
+ qp->cio_params.match = match;
+ qp->cio_params.size = qp_conf->nb_descriptors;
+
+ if (sam_cio_init(&qp->cio_params, &qp->cio) < 0)
+ break;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ dev->data->queue_pairs[qp_id] = qp;
+ return 0;
+ } while (0);
+
+ rte_free(qp);
+ return -1;
+}
+
+/** Return the number of allocated queue pairs (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns Number of allocated queue pairs.
+ */
+static uint32_t
+mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure [Unused].
+ * @returns Size of Marvell crypto session.
+ */
+static unsigned
+mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev)
+{
+ return sizeof(struct mrvl_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param xform Pointer to the crytpo configuration structure.
+ * @param sess Pointer to the empty session structure.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ struct mrvl_crypto_session *mrvl_sess;
+ void *sess_private_data;
+ int ret;
+
+ if (sess == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR("Couldn't get object from session mempool.");
+ return -ENOMEM;
+ }
+
+ ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
+ if (sam_session_create(&mrvl_sess->sam_sess_params,
+ &mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * Clear the memory of session so it doesn't leave key material behind.
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ struct mrvl_crypto_session *mrvl_sess =
+ (struct mrvl_crypto_session *)sess_priv;
+
+ if (mrvl_sess->sam_sess &&
+ sam_session_destroy(mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ }
+
+ memset(sess, 0, sizeof(struct mrvl_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+/**
+ * PMD handlers for crypto ops.
+ */
+static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
+ .dev_configure = mrvl_crypto_pmd_config,
+ .dev_start = mrvl_crypto_pmd_start,
+ .dev_stop = mrvl_crypto_pmd_stop,
+ .dev_close = mrvl_crypto_pmd_close,
+
+ .dev_infos_get = mrvl_crypto_pmd_info_get,
+
+ .stats_get = mrvl_crypto_pmd_stats_get,
+ .stats_reset = mrvl_crypto_pmd_stats_reset,
+
+ .queue_pair_setup = mrvl_crypto_pmd_qp_setup,
+ .queue_pair_release = mrvl_crypto_pmd_qp_release,
+ .queue_pair_count = mrvl_crypto_pmd_qp_count,
+
+ .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = mrvl_crypto_pmd_sym_session_configure,
+ .sym_session_clear = mrvl_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
new file mode 100644
index 00000000..c16d95b4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_PMD_PRIVATE_H_
+#define _RTE_MRVL_PMD_PRIVATE_H_
+
+#include "rte_mrvl_compat.h"
+
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
+/**< Marvell PMD device name */
+
+#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#else
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+/**
+ * Handy bits->bytes conversion macro.
+ */
+#define BITS2BYTES(x) ((x) >> 3)
+
+/** The operation order mode enumerator. */
+enum mrvl_crypto_chain_order {
+ MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
+ MRVL_CRYPTO_CHAIN_AUTH_ONLY,
+ MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
+ MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
+ MRVL_CRYPTO_CHAIN_COMBINED,
+ MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
+};
+
+/** Private data structure for each crypto device. */
+struct mrvl_crypto_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** MRVL crypto queue pair structure. */
+struct mrvl_crypto_qp {
+ /** SAM CIO (MUSDK Queue Pair equivalent).*/
+ struct sam_cio *cio;
+
+ /** Session Mempool. */
+ struct rte_mempool *sess_mp;
+
+ /** Queue pair statistics. */
+ struct rte_cryptodev_stats stats;
+
+ /** CIO initialization parameters.*/
+ struct sam_cio_params cio_params;
+} __rte_cache_aligned;
+
+/** MRVL crypto private session structure. */
+struct mrvl_crypto_session {
+ /** Crypto operations chain order. */
+ enum mrvl_crypto_chain_order chain_order;
+
+ /** Session initialization parameters. */
+ struct sam_session_params sam_sess_params;
+
+ /** SAM session pointer. */
+ struct sam_sa *sam_sess;
+
+ /** Cipher IV offset. */
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+/** Set and validate MRVL crypto session parameters */
+extern int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
+
+#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map b/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/null/Makefile b/src/spdk/dpdk/drivers/crypto/null/Makefile
new file mode 100644
index 00000000..9e6400c1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_null_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_null_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/null/meson.build b/src/spdk/dpdk/drivers/crypto/null/meson.build
new file mode 100644
index 00000000..502336da
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += 'bus_vdev'
+name = 'null_crypto'
+sources = files('null_crypto_pmd.c', 'null_crypto_pmd_ops.c')
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c
new file mode 100644
index 00000000..6e29a21a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+
+#include "null_crypto_pmd_private.h"
+
+static uint8_t cryptodev_driver_id;
+
+/** verify and set session parameters */
+int
+null_crypto_set_session_parameters(
+ struct null_crypto_session *sess __rte_unused,
+ const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL) {
+ return -EINVAL;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ /* Authentication Only */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* Authentication then Cipher */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL) {
+ /* Cipher Only */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Cipher then Authentication */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
+ struct null_crypto_session *sess __rte_unused)
+{
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0,
+ sizeof(struct null_crypto_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct null_crypto_session *
+get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct null_crypto_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct null_crypto_session *)
+ get_sym_session_private_data(
+ sym_op->session, cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct null_crypto_session *)_sess_private_data;
+
+ if (unlikely(null_crypto_set_session_parameters(sess,
+ sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+static uint16_t
+null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_session *sess;
+ struct null_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create crypto device */
+static int
+cryptodev_null_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct null_crypto_private *internals;
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ NULL_LOG(ERR, "failed to create cryptodev vdev");
+ return -EFAULT;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = null_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+}
+
+/** Initialise null crypto device */
+static int
+cryptodev_null_probe(struct rte_vdev_device *dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct null_crypto_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ NULL_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_null_create(name, dev, &init_params);
+}
+
+static int
+cryptodev_null_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_null_pmd_drv = {
+ .probe = cryptodev_null_probe,
+ .remove = cryptodev_null_remove_dev,
+};
+
+static struct cryptodev_driver null_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(null_init_log)
+{
+ null_logtype_driver = rte_log_register("pmd.crypto.null");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
new file mode 100644
index 00000000..bb2b6e14
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "null_crypto_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+null_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = null_crypto_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct null_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "null_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ NULL_LOG(INFO,
+ "Reusing existing ring %s for "
+ " processed packets", qp->name);
+ return r;
+ }
+
+ NULL_LOG(INFO,
+ "Unable to reuse existing ring %s for "
+ " processed packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+ struct null_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ null_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ NULL_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ NULL_LOG(ERR, "Failed to create unique name for null "
+ "crypto device");
+
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ NULL_LOG(ERR, "Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL crypto session structure */
+static unsigned
+null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct null_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static int
+null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ NULL_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ NULL_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = null_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ NULL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct null_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops pmd_ops = {
+ .dev_configure = null_crypto_pmd_config,
+ .dev_start = null_crypto_pmd_start,
+ .dev_stop = null_crypto_pmd_stop,
+ .dev_close = null_crypto_pmd_close,
+
+ .stats_get = null_crypto_pmd_stats_get,
+ .stats_reset = null_crypto_pmd_stats_reset,
+
+ .dev_infos_get = null_crypto_pmd_info_get,
+
+ .queue_pair_setup = null_crypto_pmd_qp_setup,
+ .queue_pair_release = null_crypto_pmd_qp_release,
+ .queue_pair_count = null_crypto_pmd_qp_count,
+
+ .sym_session_get_size = null_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = null_crypto_pmd_sym_session_configure,
+ .sym_session_clear = null_crypto_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
new file mode 100644
index 00000000..d5905afd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
+#define _NULL_CRYPTO_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+/**< Null crypto PMD device name */
+
+int null_logtype_driver;
+
+#define NULL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, null_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+
+/** private data structure for each NULL crypto device */
+struct null_crypto_private {
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+};
+
+/** NULL crypto queue pair */
+struct null_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** NULL crypto private session structure */
+struct null_crypto_session {
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+/** Set and validate NULL crypto session parameters */
+extern int
+null_crypto_set_session_parameters(struct null_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *null_crypto_pmd_ops;
+
+#endif /* _NULL_CRYPTO_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map b/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/Makefile b/src/spdk/dpdk/drivers/crypto/openssl/Makefile
new file mode 100644
index 00000000..8fe086b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_openssl.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_openssl_version.map
+
+# external library dependencies
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/compat.h b/src/spdk/dpdk/drivers/crypto/openssl/compat.h
new file mode 100644
index 00000000..45f9a33d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/compat.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+#define set_rsa_params(rsa, p, q, ret) \
+ do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ do { \
+ rsa->dmp1 = dmp1; \
+ rsa->dmq1 = dmq1; \
+ rsa->iqmp = iqmp; \
+ ret = 0; \
+ } while (0)
+
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ do { \
+ rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
+ } while (0)
+
+#define set_dh_params(dh, p, g, ret) \
+ do { \
+ dh->p = p; \
+ dh->q = NULL; \
+ dh->g = g; \
+ ret = 0; \
+ } while (0)
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ do { dh->priv_key = priv_key; ret = 0; } while (0)
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
+
+#define get_dh_pub_key(dh, pub_key) \
+ (pub_key = dh->pub_key)
+
+#define get_dh_priv_key(dh, priv_key) \
+ (priv_key = dh->priv_key)
+
+#define set_dsa_sign(sign, r, s) \
+ do { sign->r = r; sign->s = s; } while (0)
+
+#define get_dsa_sign(sign, r, s) \
+ do { r = sign->r; s = sign->s; } while (0)
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (dsa->pub_key = pub_key)
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (priv_key = dsa->priv_key)
+
+#else
+
+#define set_rsa_params(rsa, p, q, ret) \
+ (ret = !RSA_set0_factors(rsa, p, q))
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+
+/* n, e must be non-null, d can be NULL */
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ (ret = !RSA_set0_key(rsa, n, e, d))
+
+#define set_dh_params(dh, p, g, ret) \
+ (ret = !DH_set0_pqg(dh, p, NULL, g))
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ (ret = !DH_set0_key(dh, NULL, priv_key))
+
+#define get_dh_pub_key(dh, pub_key) \
+ (DH_get0_key(dh_key, &pub_key, NULL))
+
+#define get_dh_priv_key(dh, priv_key) \
+ (DH_get0_key(dh_key, NULL, &priv_key))
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ (ret = !DSA_set0_pqg(dsa, p, q, g))
+
+#define set_dsa_priv_key(dsa, priv_key) \
+ (DSA_set0_key(dsa, NULL, priv_key))
+
+#define set_dsa_sign(sign, r, s) \
+ (DSA_SIG_set0(sign, r, s))
+
+#define get_dsa_sign(sign, r, s) \
+ (DSA_SIG_get0(sign, &r, &s))
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ (ret = !DSA_set0_key(dsa, pub, priv))
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (DSA_set0_key(dsa, pub_key, NULL))
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (DSA_get0_key(dsa, NULL, &priv_key))
+
+#endif /* version < 10100000 */
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/meson.build b/src/spdk/dpdk/drivers/crypto/openssl/meson.build
new file mode 100644
index 00000000..c2a0dd8b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+sources = files('rte_openssl_pmd.c', 'rte_openssl_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
new file mode 100644
index 00000000..7d263aba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -0,0 +1,2194 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+
+#include "rte_openssl_pmd_private.h"
+#include "compat.h"
+
+#define DES_BLOCK_SIZE 8
+
+static uint8_t cryptodev_driver_id;
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx = OPENSSL_malloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ if (ctx != NULL) {
+ HMAC_CTX_cleanup(ctx);
+ OPENSSL_free(ctx);
+ }
+}
+#endif
+
+static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Increment counter by 1
+ * Counter is 64 bit array, big-endian
+ */
+static void
+ctr_inc(uint8_t *ctr)
+{
+ uint64_t *ctr64 = (uint64_t *)ctr;
+
+ *ctr64 = __builtin_bswap64(*ctr64);
+ (*ctr64)++;
+ *ctr64 = __builtin_bswap64(*ctr64);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum openssl_chain_order
+openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = OPENSSL_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = OPENSSL_CHAIN_CIPHER_AUTH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ res = OPENSSL_CHAIN_COMBINED;
+ }
+
+ return res;
+}
+
+/** Get session cipher key from input cipher key */
+static void
+get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
+{
+ memcpy(session_key, input_key, keylen);
+}
+
+/** Get key ede 24 bytes standard from input key */
+static int
+get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
+{
+ int res = 0;
+
+ /* Initialize keys - 24 bytes: [key1-key2-key3] */
+ switch (keylen) {
+ case 24:
+ memcpy(key_ede, key, 24);
+ break;
+ case 16:
+ /* K3 = K1 */
+ memcpy(key_ede, key, 16);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ case 8:
+ /* K1 = K2 = K3 (DES compatibility) */
+ memcpy(key_ede, key, 8);
+ memcpy(key_ede + 8, key, 8);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ default:
+ OPENSSL_LOG(ERR, "Unsupported key size");
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ switch (keylen) {
+ case 8:
+ *algo = EVP_des_cbc();
+ break;
+ case 16:
+ *algo = EVP_des_ede_cbc();
+ break;
+ case 24:
+ *algo = EVP_des_ede3_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_cbc();
+ break;
+ case 24:
+ *algo = EVP_aes_192_cbc();
+ break;
+ case 32:
+ *algo = EVP_aes_256_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ctr();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ctr();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ctr();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input auth algorithm */
+static uint8_t
+get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sessalgo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ccm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ccm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ccm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/* Set session AEAD encryption parameters */
+static int
+openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ do_ccm = 0;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length,
+ NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set session AEAD decryption parameters */
+static int
+openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm = 0;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type,
+ sess->iv.length, NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->cipher.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Select cipher algo */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_key_ede(xform->cipher.key.data,
+ sess->cipher.key.length,
+ sess->cipher.key.data) != 0)
+ return -EINVAL;
+ break;
+
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ sess->cipher.bpi_ctx = EVP_CIPHER_CTX_new();
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt */
+ if (EVP_EncryptInit_ex(sess->cipher.bpi_ctx, EVP_des_ecb(),
+ NULL, xform->cipher.key.data, 0) != 1)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+ default:
+ sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/* Set session auth parameters */
+static int
+openssl_set_session_auth_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ sess->auth.digest_length = xform->auth.digest_length;
+
+ /* Select auth algo */
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ /*
+ * OpenSSL requires GMAC to be a GCM operation
+ * with no cipher data length
+ */
+ sess->cipher.key.length = xform->auth.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ return openssl_set_sess_aead_enc_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.auth.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.auth.ctx = EVP_MD_CTX_create();
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
+ sess->auth.hmac.ctx = HMAC_CTX_new();
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.hmac.evp_algo) != 0)
+ return -EINVAL;
+
+ if (HMAC_Init_ex(sess->auth.hmac.ctx,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ sess->auth.hmac.evp_algo, NULL) != 1)
+ return -EINVAL;
+ break;
+
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/* Set session AEAD parameters */
+static int
+openssl_set_session_aead_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher key */
+ sess->cipher.key.length = xform->aead.key.length;
+
+ /* Set IV parameters */
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+ /*
+ * For AES-CCM, the actual IV is placed
+ * one byte after the start of the IV field,
+ * according to the API.
+ */
+ sess->iv.offset = xform->aead.iv.offset + 1;
+ else
+ sess->iv.offset = xform->aead.iv.offset;
+
+ sess->iv.length = xform->aead.iv.length;
+
+ sess->auth.aad_length = xform->aead.aad_length;
+ sess->auth.digest_length = xform->aead.digest_length;
+
+ sess->aead_algo = xform->aead.algo;
+ /* Select cipher direction */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return openssl_set_sess_aead_enc_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
+
+ sess->chain_order = openssl_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ /* cipher_xform must be check before auth_xform */
+ if (cipher_xform) {
+ ret = openssl_set_session_cipher_parameters(
+ sess, cipher_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+
+ if (auth_xform) {
+ ret = openssl_set_session_auth_parameters(sess, auth_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+
+ if (aead_xform) {
+ ret = openssl_set_session_aead_parameters(sess, aead_xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR,
+ "Invalid/unsupported AEAD parameters");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Reset private session parameters */
+void
+openssl_reset_session(struct openssl_session *sess)
+{
+ EVP_CIPHER_CTX_free(sess->cipher.ctx);
+
+ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
+ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ EVP_PKEY_free(sess->auth.hmac.pkey);
+ HMAC_CTX_free(sess->auth.hmac.ctx);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Provide session for operation */
+static void *
+get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+{
+ struct openssl_session *sess = NULL;
+ struct openssl_asym_session *asym_sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL))
+ sess = (struct openssl_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ if (likely(op->asym->session != NULL))
+ asym_sess = (struct openssl_asym_session *)
+ get_asym_session_private_data(
+ op->asym->session,
+ cryptodev_driver_id);
+ if (asym_sess == NULL)
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return asym_sess;
+ }
+ } else {
+ /* sessionless asymmetric not supported */
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return NULL;
+
+ /* provide internal session */
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct openssl_session *)_sess_private_data;
+
+ if (unlikely(openssl_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (sess == NULL)
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+static inline int
+process_openssl_encryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+static inline int
+process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ int totlen;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_cipher_encrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t i;
+ uint8_t encrypted_iv[DES_BLOCK_SIZE];
+ int encrypted_ivlen;
+
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen,
+ iv, DES_BLOCK_SIZE) <= 0)
+ goto process_cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst + i) = *(src + i) ^ (encrypted_iv[i]);
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed");
+ return -EINVAL;
+}
+/** Process standard openssl cipher decryption */
+static int
+process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ int totlen;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_cipher_decrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_decrypt_err;
+ return 0;
+
+process_cipher_decrypt_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed");
+ return -EINVAL;
+}
+
+/** Process cipher des 3 ctr encryption, decryption algorithm */
+static int
+process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t ebuf[8], ctr[8];
+ int unused, n;
+ struct rte_mbuf *m;
+ uint8_t *src;
+ int l;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_cipher_des3ctr_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ l = rte_pktmbuf_data_len(m) - offset;
+
+ /* We use 3DES encryption also for decryption.
+ * IV is not important for 3DES ecb
+ */
+ if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+ goto process_cipher_des3ctr_err;
+
+ memcpy(ctr, iv, 8);
+
+ for (n = 0; n < srclen; n++) {
+ if (n % 8 == 0) {
+ if (EVP_EncryptUpdate(ctx,
+ (unsigned char *)&ebuf, &unused,
+ (const unsigned char *)&ctr, 8) <= 0)
+ goto process_cipher_des3ctr_err;
+ ctr_inc(ctr);
+ }
+ dst[n] = *(src++) ^ ebuf[n % 8];
+
+ l--;
+ if (!l) {
+ m = m->next;
+ if (m) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m);
+ }
+ }
+ }
+
+ return 0;
+
+process_cipher_des3ctr_err:
+ OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ return 0;
+
+process_auth_encryption_gcm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed");
+ return -EINVAL;
+}
+
+/** Process AES-CCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t taglen, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_GET_TAG, taglen, tag) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ return 0;
+
+process_auth_encryption_ccm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_decryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_gcm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed");
+ return -EINVAL;
+}
+
+/** Process AES-CCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t tag_len,
+ EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, tag_len, tag) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_ccm_err:
+ OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms */
+static int
+process_openssl_auth(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ __rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
+ goto process_auth_err;
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms with hmac */
+static int
+process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ int srclen, HMAC_CTX *ctx)
+{
+ unsigned int dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (HMAC_Update(ctx, (unsigned char *)src, srclen) != 1)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (HMAC_Final(ctx, dst, &dstlen) != 1)
+ goto process_auth_err;
+
+ if (unlikely(HMAC_Init_ex(ctx, NULL, 0, NULL, NULL) != 1))
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
+ return -EINVAL;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/** Process auth/cipher combined operation */
+static void
+process_openssl_combined_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ /* cipher */
+ uint8_t *dst = NULL, *iv, *tag, *aad;
+ int srclen, aadlen, status = -1;
+ uint32_t offset;
+ uint8_t taglen;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ srclen = 0;
+ offset = op->sym->auth.data.offset;
+ aadlen = op->sym->auth.data.length;
+ aad = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + aadlen);
+ } else {
+ srclen = op->sym->aead.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->aead.data.offset);
+ offset = op->sym->aead.data.offset;
+ aad = op->sym->aead.aad.data;
+ aadlen = sess->auth.aad_length;
+ tag = op->sym->aead.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + srclen);
+ }
+
+ taglen = sess->auth.digest_length;
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_encryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_encryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+
+ } else {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_decryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_decryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+ }
+
+ if (status != 0) {
+ if (status == (-EFAULT) &&
+ sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+/** Process cipher operation */
+static void
+process_openssl_cipher_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst, *iv;
+ int srclen, status;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ srclen = op->sym->cipher.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ else
+ status = process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ else
+ status = process_openssl_cipher_des3ctr(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx);
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process cipher operation */
+static void
+process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ uint8_t block_size, last_block_len;
+ int srclen, status = 0;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ block_size = DES_BLOCK_SIZE;
+
+ last_block_len = srclen % block_size;
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* Encrypt only with ECB mode XOR IV */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv, srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ srclen -= last_block_len;
+ /* Encrypt with the block aligned stream with CBC mode */
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen;
+ /*
+ * IV is the last encrypted block from
+ * the previous operation
+ */
+ iv = dst - block_size;
+ src += srclen;
+ srclen = last_block_len;
+ /* Encrypt the last frame with ECB mode */
+ status |= process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ srclen, sess->cipher.bpi_ctx);
+ }
+ }
+ } else {
+ /* Decrypt only with ECB mode (encrypt, as it is same operation) */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv,
+ srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen - last_block_len;
+ src += srclen - last_block_len;
+ /*
+ * IV is the last full block
+ */
+ iv = src - block_size;
+ /*
+ * Decrypt the last frame with ECB mode
+ * (encrypt, as it is the same operation)
+ */
+ status = process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ last_block_len, sess->cipher.bpi_ctx);
+ /* Prepare parameters for CBC mode op */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ dst += last_block_len - srclen;
+ srclen -= last_block_len;
+ }
+
+ /* Decrypt with CBC mode */
+ status |= process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ srclen, sess->cipher.ctx);
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process auth operation */
+static void
+process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst;
+ int srclen, status;
+
+ srclen = op->sym->auth.data.length;
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ dst = qp->temp_digest;
+ else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL)
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ status = process_openssl_auth(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL, NULL, srclen,
+ sess->auth.auth.ctx, sess->auth.auth.evp_algo);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ status = process_openssl_auth_hmac(mbuf_src, dst,
+ op->sym->auth.data.offset, srclen,
+ sess->auth.hmac.ctx);
+ break;
+ default:
+ status = -1;
+ break;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/* process dsa sign operation */
+static int
+process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ DSA_SIG *sign = NULL;
+
+ sign = DSA_do_sign(op->message.data,
+ op->message.length,
+ dsa);
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ const BIGNUM *r = NULL, *s = NULL;
+ get_dsa_sign(sign, r, s);
+
+ op->r.length = BN_bn2bin(r, op->r.data);
+ op->s.length = BN_bn2bin(s, op->s.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dsa verify operation */
+static int
+process_openssl_dsa_verify_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ int ret;
+ DSA_SIG *sign = DSA_SIG_new();
+ BIGNUM *r = NULL, *s = NULL;
+ BIGNUM *pub_key = NULL;
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ r = BN_bin2bn(op->r.data,
+ op->r.length,
+ r);
+ s = BN_bin2bn(op->s.data,
+ op->s.length,
+ s);
+ pub_key = BN_bin2bn(op->y.data,
+ op->y.length,
+ pub_key);
+ if (!r || !s || !pub_key) {
+ if (r)
+ BN_free(r);
+ if (s)
+ BN_free(s);
+ if (pub_key)
+ BN_free(pub_key);
+
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dsa_sign(sign, r, s);
+ set_dsa_pub_key(dsa, pub_key);
+
+ ret = DSA_do_verify(op->message.data,
+ op->message.length,
+ sign,
+ dsa);
+
+ if (ret != 1)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dh operation */
+static int
+process_openssl_dh_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dh_op_param *op = &cop->asym->dh;
+ DH *dh_key = sess->u.dh.dh_key;
+ BIGNUM *priv_key = NULL;
+ int ret = 0;
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) {
+ /* compute shared secret using peer public key
+ * and current private key
+ * shared secret = peer_key ^ priv_key mod p
+ */
+ BIGNUM *peer_key = NULL;
+
+ /* copy private key and peer key and compute shared secret */
+ peer_key = BN_bin2bn(op->pub_key.data,
+ op->pub_key.length,
+ peer_key);
+ if (peer_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ BN_free(peer_key);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ BN_free(priv_key);
+ return 0;
+ }
+
+ ret = DH_compute_key(
+ op->shared_secret.data,
+ peer_key, dh_key);
+ if (ret < 0) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ /* priv key is already loaded into dh,
+ * let's not free that directly here.
+ * DH_free() will auto free it later.
+ */
+ return 0;
+ }
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->shared_secret.length = ret;
+ BN_free(peer_key);
+ return 0;
+ }
+
+ /*
+ * other options are public and private key generations.
+ *
+ * if user provides private key,
+ * then first set DH with user provided private key
+ */
+ if ((sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) &&
+ !(sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) {
+ /* generate public key using user-provided private key
+ * pub_key = g ^ priv_key mod p
+ */
+
+ /* load private key into DH */
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(priv_key);
+ return 0;
+ }
+ }
+
+ /* generate public and private key pair.
+ *
+ * if private key already set, generates only public key.
+ *
+ * if private key is not already set, then set it to random value
+ * and update internal private key.
+ */
+ if (!DH_generate_key(dh_key)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return 0;
+ }
+
+ if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) {
+ const BIGNUM *pub_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d update public key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_pub_key(dh_key, pub_key);
+
+ /* output public key */
+ op->pub_key.length = BN_bn2bin(pub_key,
+ op->pub_key.data);
+ }
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) {
+ const BIGNUM *priv_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_priv_key(dh_key, priv_key);
+
+ /* provide generated private key back to user */
+ op->priv_key.length = BN_bn2bin(priv_key,
+ op->priv_key.data);
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ return 0;
+}
+
+/* process modinv operation */
+static int
+process_openssl_modinv_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.m.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.m.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process modexp operation */
+static int
+process_openssl_modexp_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.e.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.e.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_exp(res, base, sess->u.e.exp,
+ sess->u.e.mod, sess->u.e.ctx)) {
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process rsa operations */
+static int
+process_openssl_rsa_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ int ret = 0;
+ struct rte_crypto_asym_op *op = cop->asym;
+ RSA *rsa = sess->u.r.rsa;
+ uint32_t pad = (op->rsa.pad);
+
+ switch (pad) {
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2:
+ pad = RSA_PKCS1_PADDING;
+ break;
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ pad = RSA_NO_PADDING;
+ break;
+ default:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ OPENSSL_LOG(ERR,
+ "rsa pad type not supported %d\n", pad);
+ return 0;
+ }
+
+ switch (op->rsa.op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ ret = RSA_public_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ OPENSSL_LOG(DEBUG,
+ "length of encrypted text %d\n", ret);
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ ret = RSA_private_decrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ ret = RSA_private_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.sign.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ ret = RSA_public_decrypt(op->rsa.sign.length,
+ op->rsa.sign.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+
+ OPENSSL_LOG(DEBUG,
+ "Length of public_decrypt %d "
+ "length of message %zd\n",
+ ret, op->rsa.message.length);
+
+ if (memcmp(op->rsa.sign.data, op->rsa.message.data,
+ op->rsa.message.length)) {
+ OPENSSL_LOG(ERR,
+ "RSA sign Verification failed");
+ return -1;
+ }
+ break;
+
+ default:
+ /* allow ops with invalid args to be pushed to
+ * completion queue
+ */
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+
+ if (ret < 0)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ return 0;
+}
+
+static int
+process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_asym_session *sess)
+{
+ int retval = 0;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ retval = process_openssl_rsa_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ retval = process_openssl_modexp_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ retval = process_openssl_modinv_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ retval = process_openssl_dh_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+ retval = process_openssl_dsa_sign_op(op, sess);
+ else if (op->asym->dsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY)
+ retval =
+ process_openssl_dsa_verify_op(op, sess);
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+ if (!retval) {
+ /* op processed so push to completion queue as processed */
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ if (retval)
+ /* return error if failed to put in completion queue */
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+ int retval;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, mdst, mdst);
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ process_openssl_combined_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_BPI:
+ process_openssl_docsis_bpi_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ retval = -1;
+
+ return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ void *sess;
+ struct openssl_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ retval = process_op(qp, ops[i],
+ (struct openssl_session *) sess);
+ else
+ retval = process_asym_op(qp, ops[i],
+ (struct openssl_asym_session *) sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ qp->stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create OPENSSL crypto device */
+static int
+cryptodev_openssl_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct openssl_private *internals;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ OPENSSL_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_openssl_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = openssl_pmd_dequeue_burst;
+ dev->enqueue_burst = openssl_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+
+init_error:
+ OPENSSL_LOG(ERR, "driver %s: create failed",
+ init_params->name);
+
+ cryptodev_openssl_remove(vdev);
+ return -EFAULT;
+}
+
+/** Initialise OPENSSL crypto device */
+static int
+cryptodev_openssl_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct openssl_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_openssl_create(name, vdev, &init_params);
+}
+
+/** Uninitialise OPENSSL crypto device */
+static int
+cryptodev_openssl_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
+ .probe = cryptodev_openssl_probe,
+ .remove = cryptodev_openssl_remove
+};
+
+static struct cryptodev_driver openssl_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
+ cryptodev_openssl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
+ cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(openssl_init_log)
+{
+ openssl_logtype_driver = rte_log_register("pmd.crypto.openssl");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
new file mode 100644
index 00000000..de228439
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -0,0 +1,1264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_openssl_pmd_private.h"
+#include "compat.h"
+
+
+static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ }, }
+ }, }
+ },
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {
+ .modlen = {
+ /* min length is based on openssl rsa keygen */
+ .min = 30,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modexp */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modinv */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dh */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) |
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE |
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE))),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dsa */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+openssl_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+openssl_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+openssl_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+openssl_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+openssl_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+openssl_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct openssl_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = openssl_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+/** Release queue pair */
+static int
+openssl_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct openssl_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "openssl_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ OPENSSL_LOG(INFO,
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ OPENSSL_LOG(ERR,
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct openssl_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ openssl_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (openssl_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+openssl_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the symmetric session structure */
+static unsigned
+openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_session);
+}
+
+/** Returns the size of the asymmetric session structure */
+static unsigned
+openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_asym_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ OPENSSL_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static int openssl_set_asym_session_parameters(
+ struct openssl_asym_session *asym_session,
+ struct rte_crypto_asym_xform *xform)
+{
+ int ret = 0;
+
+ if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next != NULL)) {
+ OPENSSL_LOG(ERR, "chained xfrms are not supported on %s",
+ rte_crypto_asym_xform_strings[xform->xform_type]);
+ return -1;
+ }
+
+ switch (xform->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ {
+ BIGNUM *n = NULL;
+ BIGNUM *e = NULL;
+ BIGNUM *d = NULL;
+ BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL;
+ BIGNUM *iqmp = NULL, *dmq1 = NULL;
+
+ /* copy xfrm data into rsa struct */
+ n = BN_bin2bn((const unsigned char *)xform->rsa.n.data,
+ xform->rsa.n.length, n);
+ e = BN_bin2bn((const unsigned char *)xform->rsa.e.data,
+ xform->rsa.e.length, e);
+
+ if (!n || !e)
+ goto err_rsa;
+
+ RSA *rsa = RSA_new();
+ if (rsa == NULL)
+ goto err_rsa;
+
+ if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) {
+ d = BN_bin2bn(
+ (const unsigned char *)xform->rsa.d.data,
+ xform->rsa.d.length,
+ d);
+ if (!d) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ } else {
+ p = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.p.data,
+ xform->rsa.qt.p.length,
+ p);
+ q = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.q.data,
+ xform->rsa.qt.q.length,
+ q);
+ dmp1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dP.data,
+ xform->rsa.qt.dP.length,
+ dmp1);
+ dmq1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dQ.data,
+ xform->rsa.qt.dQ.length,
+ dmq1);
+ iqmp = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.qInv.data,
+ xform->rsa.qt.qInv.length,
+ iqmp);
+
+ if (!p || !q || !dmp1 || !dmq1 || !iqmp) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_params(rsa, p, q, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set rsa params\n");
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set crt params\n");
+ RSA_free(rsa);
+ /*
+ * set already populated params to NULL
+ * as its freed by call to RSA_free
+ */
+ p = q = NULL;
+ goto err_rsa;
+ }
+ }
+
+ set_rsa_keys(rsa, n, e, d, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
+ RSA_free(rsa);
+ return -1;
+ }
+ asym_session->u.r.rsa = rsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+ break;
+err_rsa:
+ if (n)
+ BN_free(n);
+ if (e)
+ BN_free(e);
+ if (d)
+ BN_free(d);
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (dmp1)
+ BN_free(dmp1);
+ if (dmq1)
+ BN_free(dmq1);
+ if (iqmp)
+ BN_free(iqmp);
+
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ {
+ struct rte_crypto_modex_xform *xfrm = &(xform->modex);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ BIGNUM *exp = BN_CTX_get(ctx);
+ if (mod == NULL || exp == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length, mod);
+ exp = BN_bin2bn((const unsigned char *)
+ xfrm->exponent.data,
+ xfrm->exponent.length, exp);
+ asym_session->u.e.ctx = ctx;
+ asym_session->u.e.mod = mod;
+ asym_session->u.e.exp = exp;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ {
+ struct rte_crypto_modinv_xform *xfrm = &(xform->modinv);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ if (mod == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length,
+ mod);
+ asym_session->u.m.ctx = ctx;
+ asym_session->u.m.modulus = mod;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ {
+ BIGNUM *p = NULL;
+ BIGNUM *g = NULL;
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dh.p.data,
+ xform->dh.p.length,
+ p);
+ g = BN_bin2bn((const unsigned char *)
+ xform->dh.g.data,
+ xform->dh.g.length,
+ g);
+ if (!p || !g)
+ goto err_dh;
+
+ DH *dh = DH_new();
+ if (dh == NULL) {
+ OPENSSL_LOG(ERR,
+ "failed to allocate resources\n");
+ goto err_dh;
+ }
+ set_dh_params(dh, p, g, ret);
+ if (ret) {
+ DH_free(dh);
+ goto err_dh;
+ }
+
+ /*
+ * setup xfrom for
+ * public key generate, or
+ * DH Priv key generate, or both
+ * public and private key generate
+ */
+ asym_session->u.dh.key_op = (1 << xform->dh.type);
+
+ if (xform->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) {
+ /* check if next is pubkey */
+ if ((xform->next != NULL) &&
+ (xform->next->xform_type ==
+ RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)
+ ) {
+ /*
+ * setup op as pub/priv key
+ * pair generationi
+ */
+ asym_session->u.dh.key_op |=
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE);
+ }
+ }
+ asym_session->u.dh.dh_key = dh;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ break;
+
+err_dh:
+ OPENSSL_LOG(ERR, " failed to set dh params\n");
+ if (p)
+ BN_free(p);
+ if (g)
+ BN_free(g);
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ {
+ BIGNUM *p = NULL, *g = NULL;
+ BIGNUM *q = NULL, *priv_key = NULL;
+ BIGNUM *pub_key = BN_new();
+ BN_zero(pub_key);
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dsa.p.data,
+ xform->dsa.p.length,
+ p);
+
+ g = BN_bin2bn((const unsigned char *)
+ xform->dsa.g.data,
+ xform->dsa.g.length,
+ g);
+
+ q = BN_bin2bn((const unsigned char *)
+ xform->dsa.q.data,
+ xform->dsa.q.length,
+ q);
+ if (!p || !q || !g)
+ goto err_dsa;
+
+ priv_key = BN_bin2bn((const unsigned char *)
+ xform->dsa.x.data,
+ xform->dsa.x.length,
+ priv_key);
+ if (priv_key == NULL)
+ goto err_dsa;
+
+ DSA *dsa = DSA_new();
+ if (dsa == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ goto err_dsa;
+ }
+
+ set_dsa_params(dsa, p, q, g, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to dsa params\n");
+ goto err_dsa;
+ }
+
+ /*
+ * openssl 1.1.0 mandate that public key can't be
+ * NULL in very first call. so set a dummy pub key.
+ * to keep consistency, lets follow same approach for
+ * both versions
+ */
+ /* just set dummy public for very 1st call */
+ set_dsa_keys(dsa, pub_key, priv_key, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to set keys\n");
+ return -1;
+ }
+ asym_session->u.s.dsa = dsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA;
+ break;
+
+err_dsa:
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (g)
+ BN_free(g);
+ if (priv_key)
+ BN_free(priv_key);
+ if (pub_key)
+ BN_free(pub_key);
+ return -1;
+ }
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *asym_sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid asymmetric session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &asym_sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_asym_session_parameters(asym_sess_private_data,
+ xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, asym_sess_private_data);
+ return ret;
+ }
+
+ set_asym_session_private_data(sess, dev->driver_id,
+ asym_sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+openssl_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static void openssl_reset_asym_session(struct openssl_asym_session *sess)
+{
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ if (sess->u.r.rsa)
+ RSA_free(sess->u.r.rsa);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ if (sess->u.e.ctx) {
+ BN_CTX_end(sess->u.e.ctx);
+ BN_CTX_free(sess->u.e.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ if (sess->u.m.ctx) {
+ BN_CTX_end(sess->u.m.ctx);
+ BN_CTX_free(sess->u.m.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ if (sess->u.dh.dh_key)
+ DH_free(sess->u.dh.dh_key);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (sess->u.s.dsa)
+ DSA_free(sess->u.s.dsa);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Clear the memory of asymmetric session
+ * so it doesn't leave key material behind
+ */
+static void
+openssl_pmd_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_asym_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_asym_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_asym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops openssl_pmd_ops = {
+ .dev_configure = openssl_pmd_config,
+ .dev_start = openssl_pmd_start,
+ .dev_stop = openssl_pmd_stop,
+ .dev_close = openssl_pmd_close,
+
+ .stats_get = openssl_pmd_stats_get,
+ .stats_reset = openssl_pmd_stats_reset,
+
+ .dev_infos_get = openssl_pmd_info_get,
+
+ .queue_pair_setup = openssl_pmd_qp_setup,
+ .queue_pair_release = openssl_pmd_qp_release,
+ .queue_pair_count = openssl_pmd_qp_count,
+
+ .sym_session_get_size = openssl_pmd_sym_session_get_size,
+ .asym_session_get_size = openssl_pmd_asym_session_get_size,
+ .sym_session_configure = openssl_pmd_sym_session_configure,
+ .asym_session_configure = openssl_pmd_asym_session_configure,
+ .sym_session_clear = openssl_pmd_sym_session_clear,
+ .asym_session_clear = openssl_pmd_asym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
new file mode 100644
index 00000000..a8f2c848
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/des.h>
+#include <openssl/rsa.h>
+#include <openssl/dh.h>
+#include <openssl/dsa.h>
+
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+/**< Open SSL Crypto PMD device name */
+
+/** OPENSSL PMD LOGTYPE DRIVER */
+int openssl_logtype_driver;
+#define OPENSSL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_BPI,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD algorithm */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ EVP_CIPHER_CTX *bpi_ctx;
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ HMAC_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private asymmetric session structure */
+struct openssl_asym_session {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rsa {
+ RSA *rsa;
+ } r;
+ struct exp {
+ BIGNUM *exp;
+ BIGNUM *mod;
+ BN_CTX *ctx;
+ } e;
+ struct mod {
+ BIGNUM *modulus;
+ BN_CTX *ctx;
+ } m;
+ struct dh {
+ DH *dh_key;
+ uint32_t key_op;
+ } dh;
+ struct {
+ DSA *dsa;
+ } s;
+ } u;
+} __rte_cache_aligned;
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map b/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/qat/README b/src/spdk/dpdk/drivers/crypto/qat/README
new file mode 100644
index 00000000..444ae605
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/README
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
diff --git a/src/spdk/dpdk/drivers/crypto/qat/meson.build b/src/spdk/dpdk/drivers/crypto/qat/meson.build
new file mode 100644
index 00000000..9cc98d2c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# this does not build the QAT driver, instead that is done in the compression
+# driver which comes later. Here we just add our sources files to the list
+build = false
+dep = dependency('libcrypto', required: false)
+qat_includes += include_directories('.')
+qat_deps += 'cryptodev'
+if dep.found()
+ # Add our sources files to the list
+ qat_sources += files('qat_sym_pmd.c',
+ 'qat_sym.c',
+ 'qat_sym_session.c')
+ qat_ext_deps += dep
+ pkgconfig_extra_libs += '-lcrypto'
+ qat_cflags += '-DBUILD_QAT_SYM'
+endif
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c
new file mode 100644
index 00000000..10cdf2e1
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.c
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
+{
+ int ret = 0;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
+ QAT_DP_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ }
+ min_ofs = auth_ofs;
+
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+
+ }
+
+ if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad
+ * data, then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
+ ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ min_ofs = op->sym->aead.data.offset;
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher || do_aead) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+
+ if (do_auth || do_aead) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
+#endif
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h
new file mode 100644
index 00000000..bc6426c3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h
new file mode 100644
index 00000000..eea08bc7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_capabilities.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_CAPABILITIES_H_
+#define _QAT_SYM_CAPABILITIES_H_
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 28, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 32, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 48, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 16, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES XCBC MAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_CCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 16, \
+ .increment = 2 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 224, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 7, \
+ .max = 13, \
+ .increment = 1 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 240, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GMAC (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UIA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UEA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* NULL (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { 0 } \
+ }, }, \
+ }, }, \
+ }, \
+ { /* NULL (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, }, \
+ }, } \
+ }, \
+ { /* KASUMI (F8) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* KASUMI (F9) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES DOCSISBPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+ { /* ZUC (EEA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* ZUC (EIA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c
new file mode 100644
index 00000000..96f442e8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+#include "qat_sym.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return;
+}
+
+static int qat_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_sym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_sym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
+
+}
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release sym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_sym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "sym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_sym_dev_config,
+ .dev_start = qat_sym_dev_start,
+ .dev_stop = qat_sym_dev_stop,
+ .dev_close = qat_sym_dev_close,
+ .dev_infos_get = qat_sym_dev_info_get,
+
+ .stats_get = qat_sym_stats_get,
+ .stats_reset = qat_sym_stats_reset,
+ .queue_pair_setup = qat_sym_qp_setup,
+ .queue_pair_release = qat_sym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = qat_sym_session_get_private_size,
+ .sym_session_configure = qat_sym_session_configure,
+ .sym_session_clear = qat_sym_session_clear
+};
+
+static uint16_t
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_sym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_sym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_pci_dev->sym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->sym_dev = internals;
+
+ internals->sym_dev_id = cryptodev->data->dev_id;
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ break;
+ case QAT_GEN2:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ break;
+ default:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN2",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->sym_dev_id);
+ return 0;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ cryptodev_qat_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h
new file mode 100644
index 00000000..d3432854
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_pmd.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_PMD_H_
+#define _QAT_SYM_PMD_H_
+
+#ifdef BUILD_QAT_SYM
+
+#include <rte_cryptodev.h>
+
+#include "qat_sym_capabilities.h"
+#include "qat_device.h"
+
+/**< Intel(R) QAT Symmetric Crypto PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+extern uint8_t cryptodev_qat_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only symmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF),
+ * in future there may also be private data structures for other services.
+ */
+struct qat_sym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t sym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device symmetric crypto capabilities */
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_SYM_PMD_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c
new file mode 100644
index 00000000..1d58220a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c
@@ -0,0 +1,1725 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include <openssl/evp.h> /* Needed for bpi runt block processing */
+
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_crypto_sym.h>
+
+#include "qat_logs.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static int
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key, void **ctx)
+{
+ const EVP_CIPHER *algo = NULL;
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
+
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
+ goto ctx_init_err;
+ }
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
+ goto ctx_init_err;
+ }
+
+ return 0;
+
+ctx_init_err:
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
+}
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ }
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ ret = -ENOTSUP;
+ goto error_out;
+ }
+ if (qat_sym_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ ret = -ENOTSUP;
+ goto error_out;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return ret;
+}
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR,
+ "Crypto QAT PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_sym_session *session = session_private;
+ int ret;
+ int qat_cmd_id;
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2iova(session) +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ ret = qat_sym_session_configure_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_sym_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ return -ENOTSUP;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -EINVAL;
+ }
+
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+ }
+
+ session->digest_length = aead_xform->digest_length;
+ return 0;
+}
+
+unsigned int qat_sym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
+}
+
+/* returns block size in bytes per cipher algo */
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
+{
+ switch (qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_DES:
+ return ICP_QAT_HW_DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_3DES:
+ return ICP_QAT_HW_3DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_AES128:
+ case ICP_QAT_HW_CIPHER_ALGO_AES192:
+ case ICP_QAT_HW_CIPHER_ALGO_AES256:
+ return ICP_QAT_HW_AES_BLK_SZ;
+ default:
+ QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/*
+ * Returns size in bytes per hash algo for state1 size field in cd_ctrl
+ * This is digest size rounded up to nearest quadword
+ */
+static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum state1 size in this case */
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns digest size in bytes per hash algo */
+static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum digest size in this case */
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns block size in byes per hash algo */
+static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum block size in this case */
+ return SHA512_CBLOCK;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
+static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
+{
+ int digest_size;
+ uint8_t digest[qat_hash_get_digest_size(
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint32_t *hash_state_out_be32;
+ uint64_t *hash_state_out_be64;
+ int i;
+
+ digest_size = qat_hash_get_digest_size(hash_alg);
+ if (digest_size <= 0)
+ return -EFAULT;
+
+ hash_state_out_be32 = (uint32_t *)data_out;
+ hash_state_out_be64 = (uint64_t *)data_out;
+
+ switch (hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (partial_hash_sha1(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (partial_hash_sha256(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (partial_hash_sha512(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
+ default:
+ QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+ const uint8_t *auth_key,
+ uint16_t auth_keylen,
+ uint8_t *p_state_buf,
+ uint16_t *p_state_len)
+{
+ int block_size;
+ uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ int i;
+
+ if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
+ } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ AES_KEY enc_key;
+
+ memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_GALOIS_H_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ rte_free(in);
+ return 0;
+ }
+
+ block_size = qat_hash_get_block_size(hash_alg);
+ if (block_size <= 0)
+ return -EFAULT;
+ /* init ipad and opad from key and xor with fixed values */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+
+ if (auth_keylen > (unsigned int)block_size) {
+ QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
+ return -EFAULT;
+ }
+ rte_memcpy(ipad, auth_key, auth_keylen);
+ rte_memcpy(opad, auth_key, auth_keylen);
+
+ for (i = 0; i < block_size; i++) {
+ uint8_t *ipad_ptr = ipad + i;
+ uint8_t *opad_ptr = opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ /* do partial hash of ipad and copy to state1 */
+ if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ QAT_LOG(ERR, "ipad precompute failed");
+ return -EFAULT;
+ }
+
+ /*
+ * State len is a multiple of 8, so may be larger than the digest.
+ * Put the partial hash of opad state_len bytes after state1
+ */
+ *p_state_len = qat_hash_get_state1_size(hash_alg);
+ if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ QAT_LOG(ERR, "opad precompute failed");
+ return -EFAULT;
+ }
+
+ /* don't leave data lying around */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ return 0;
+}
+
+static void
+qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags)
+{
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+
+ switch (proto_flags) {
+ case QAT_CRYPTO_PROTO_FLAG_NONE:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_CCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_GCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_ZUC:
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ }
+
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+}
+
+/*
+ * Snow3G and ZUC should never use this function
+ * and set its protocol flag in both cipher and auth part of content
+ * descriptor building function
+ */
+static enum qat_sym_proto_flag
+qat_get_crypto_proto_flag(uint16_t flags)
+{
+ int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ switch (proto) {
+ case ICP_QAT_FW_LA_GCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ break;
+ case ICP_QAT_FW_LA_CCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ break;
+ }
+
+ return qat_proto_flag;
+}
+
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
+ uint8_t *cipherkey,
+ uint32_t cipherkeylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ enum icp_qat_hw_cipher_convert key_convert;
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+ uint32_t total_key_size;
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ QAT_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
+ || cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
+ total_key_size = ICP_QAT_HW_DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ }
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
+
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+ cipher->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
+
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
+ } else {
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
+ }
+
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
+ /* K2 and K3 not provided so use K1 = K2 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey,
+ cipherkeylen);
+ memcpy(cdesc->cd_cur_ptr+cipherkeylen,
+ cipherkey, cipherkeylen);
+ } else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+
+ cdesc->cd_cur_ptr += padding_size;
+ }
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation)
+{
+ struct icp_qat_hw_auth_setup *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
+ enum qat_sym_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ QAT_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
+ }
+
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+ }
+
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ cdesc->qat_hash_alg, digestsize);
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ QAT_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ QAT_LOG(ERR, "(XCBC)precompute failed");
+ return -EFAULT;
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ QAT_LOG(ERR, "(GCM)precompute failed");
+ return -EFAULT;
+ }
+ /*
+ * Write (the length of AAD) into bytes 16-19 of state2
+ * in big-endian format. This field is 8 bytes
+ */
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ);
+ *aad_len = rte_bswap32(aad_length);
+ cdesc->aad_len = aad_length;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
+ state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cdesc->cd_cur_ptr += state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
+
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
+ &state1_size)) {
+ QAT_LOG(ERR, "(MD5)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
+ state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
+ ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
+
+ if (aad_length > 0) {
+ aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ } else {
+ auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
+ }
+ cdesc->aad_len = aad_length;
+ hash->auth_counter.counter = 0;
+
+ hash_cd_ctrl->outer_prefix_sz = digestsize;
+ auth_param->hash_state_sz = digestsize;
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
+ }
+
+ /* Request template setup */
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
+ header->service_cmd_id = cdesc->qat_cmd;
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+ hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
+
+ hash_cd_ctrl->inner_state2_sz = state2_size;
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
+ >> 3);
+
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case ICP_QAT_HW_AES_192_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case ICP_QAT_HW_AES_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_DES_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ case QAT_3DES_KEY_SZ_OPT3:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h
new file mode 100644
index 00000000..e8f51e5b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _QAT_SYM_SESSION_H_
+#define _QAT_SYM_SESSION_H_
+
+#include <rte_crypto.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+enum qat_sym_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/* Common content descriptor */
+struct qat_sym_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_sym_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
+ rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
+};
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
+
+unsigned int
+qat_sym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags);
+int
+qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int
+qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif /* _QAT_SYM_SESSION_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/Makefile b/src/spdk/dpdk/drivers/crypto/scheduler/Makefile
new file mode 100644
index 00000000..a9514e33
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_crypto_scheduler.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev -lrte_kvargs -lrte_reorder
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_crypto_scheduler_version.map
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_cryptodev_scheduler_operations.h
+SYMLINK-y-include += rte_cryptodev_scheduler.h
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
new file mode 100644
index 00000000..6e4919c4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <rte_reorder.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+int scheduler_logtype_driver;
+
+/** update the scheduler pmd's capability with attaching device's
+ * capability.
+ * For each device to be attached, the scheduler's capability should be
+ * the common capability set of all slaves
+ **/
+static uint32_t
+sync_caps(struct rte_cryptodev_capabilities *caps,
+ uint32_t nb_caps,
+ const struct rte_cryptodev_capabilities *slave_caps)
+{
+ uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+ uint32_t i;
+
+ while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+ nb_slave_caps++;
+
+ if (nb_caps == 0) {
+ rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
+ return nb_slave_caps;
+ }
+
+ for (i = 0; i < sync_nb_caps; i++) {
+ struct rte_cryptodev_capabilities *cap = &caps[i];
+ uint32_t j;
+
+ for (j = 0; j < nb_slave_caps; j++) {
+ const struct rte_cryptodev_capabilities *s_cap =
+ &slave_caps[j];
+
+ if (s_cap->op != cap->op || s_cap->sym.xform_type !=
+ cap->sym.xform_type)
+ continue;
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (s_cap->sym.auth.algo !=
+ cap->sym.auth.algo)
+ continue;
+
+ cap->sym.auth.digest_size.min =
+ s_cap->sym.auth.digest_size.min <
+ cap->sym.auth.digest_size.min ?
+ s_cap->sym.auth.digest_size.min :
+ cap->sym.auth.digest_size.min;
+ cap->sym.auth.digest_size.max =
+ s_cap->sym.auth.digest_size.max <
+ cap->sym.auth.digest_size.max ?
+ s_cap->sym.auth.digest_size.max :
+ cap->sym.auth.digest_size.max;
+
+ }
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ if (s_cap->sym.cipher.algo !=
+ cap->sym.cipher.algo)
+ continue;
+
+ /* no common cap found */
+ break;
+ }
+
+ if (j < nb_slave_caps)
+ continue;
+
+ /* remove a uncommon cap from the array */
+ for (j = i; j < sync_nb_caps - 1; j++)
+ rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
+
+ memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
+ sync_nb_caps--;
+ }
+
+ return sync_nb_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+ struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
+ uint32_t nb_caps = 0, i;
+
+ if (sched_ctx->capabilities) {
+ rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
+ if (nb_caps == 0)
+ return -1;
+ }
+
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities) *
+ (nb_caps + 1), 0, SOCKET_ID_ANY);
+ if (!sched_ctx->capabilities)
+ return -ENOMEM;
+
+ rte_memcpy(sched_ctx->capabilities, tmp_caps,
+ sizeof(struct rte_cryptodev_capabilities) * nb_caps);
+
+ return 0;
+}
+
+static void
+update_scheduler_feature_flag(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ dev->feature_flags = 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ dev->feature_flags |= dev_info.feature_flags;
+ }
+}
+
+static void
+update_max_nb_qp(struct scheduler_ctx *sched_ctx)
+{
+ uint32_t i;
+ uint32_t max_nb_qp;
+
+ if (!sched_ctx->nb_slaves)
+ return;
+
+ max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
+ dev_info.max_nb_queue_pairs : max_nb_qp;
+ }
+
+ sched_ctx->max_nb_queue_pairs = max_nb_qp;
+}
+
+/** Attach a device to the scheduler. */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ struct scheduler_slave *slave;
+ struct rte_cryptodev_info dev_info;
+ uint32_t i;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CR_SCHED_LOG(ERR, "Too many slaves attached");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ if (sched_ctx->slaves[i].dev_id == slave_id) {
+ CR_SCHED_LOG(ERR, "Slave already added");
+ return -ENOTSUP;
+ }
+
+ slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+
+ rte_cryptodev_info_get(slave_id, &dev_info);
+
+ slave->dev_id = slave_id;
+ slave->driver_id = dev_info.driver_id;
+ sched_ctx->nb_slaves++;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ slave->dev_id = 0;
+ slave->driver_id = 0;
+ sched_ctx->nb_slaves--;
+
+ CR_SCHED_LOG(ERR, "capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i, slave_pos;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
+ if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+ break;
+ if (slave_pos == sched_ctx->nb_slaves) {
+ CR_SCHED_LOG(ERR, "Cannot find slave");
+ return -ENOTSUP;
+ }
+
+ if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to detach slave");
+ return -ENOTSUP;
+ }
+
+ for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
+ memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
+ sizeof(struct scheduler_slave));
+ }
+ memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
+ sizeof(struct scheduler_slave));
+ sched_ctx->nb_slaves--;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ CR_SCHED_LOG(ERR, "capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (mode == sched_ctx->mode)
+ return 0;
+
+ switch (mode) {
+ case CDEV_SCHED_MODE_ROUNDROBIN:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ roundrobin_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ pkt_size_based_distr_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_FAILOVER:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ failover_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_MULTICORE:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ multicore_scheduler) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
+ return -1;
+ }
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Not yet supported");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return sched_ctx->mode;
+}
+
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ sched_ctx->reordering_enabled = enable_reorder;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return (int)sched_ctx->reordering_enabled;
+}
+
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler) {
+
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", scheduler->name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+ return -EINVAL;
+ }
+ snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
+ scheduler->name);
+
+ if (strlen(scheduler->description) >
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
+ "%u bytes.", scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
+ return -EINVAL;
+ }
+ snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
+ scheduler->description);
+
+ /* load scheduler instance operations functions */
+ sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
+ sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
+ sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
+ sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
+ sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
+ sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+ sched_ctx->ops.option_set = scheduler->ops->option_set;
+ sched_ctx->ops.option_get = scheduler->ops->option_get;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ if (sched_ctx->ops.create_private_ctx) {
+ int ret = (*sched_ctx->ops.create_private_ctx)(dev);
+
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Unable to create scheduler private "
+ "context");
+ return ret;
+ }
+ }
+
+ sched_ctx->mode = scheduler->mode;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
+
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
+ option_type >= CDEV_SCHED_OPTION_COUNT) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (!option) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ CR_SCHED_LOG(ERR, "Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_set)(dev, option_type, option);
+}
+
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (!option) {
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->driver_id != cryptodev_driver_id) {
+ CR_SCHED_LOG(ERR, "Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_get)(dev, option_type, option);
+}
+
+RTE_INIT(scheduler_init_log)
+{
+ scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
new file mode 100644
index 00000000..3faea409
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_H
+#define _RTE_CRYPTO_SCHEDULER_H
+
+/**
+ * @file rte_cryptodev_scheduler.h
+ *
+ * RTE Cryptodev Scheduler Device
+ *
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * Cryptodevs into a single logical crypto device, and the scheduling the
+ * crypto operations to the slaves based on the mode of the specified mode of
+ * operation specified and supported. This implementation supports 3 modes of
+ * operation: round robin, packet-size based, and fail-over.
+ */
+
+#include <stdint.h>
+#include "rte_cryptodev_scheduler_operations.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of bonded devices per device */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
+/** Maximum number of multi-core worker cores */
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
+
+/** Round-robin scheduling mode string */
+#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
+/** Packet-size based distribution scheduling mode string */
+#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
+/** Fail-over scheduling mode string */
+#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+/** multi-core scheduling mode string */
+#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
+
+/**
+ * Crypto scheduler PMD operation modes
+ */
+enum rte_cryptodev_scheduler_mode {
+ CDEV_SCHED_MODE_NOT_SET = 0,
+ /** User defined mode */
+ CDEV_SCHED_MODE_USERDEFINED,
+ /** Round-robin mode */
+ CDEV_SCHED_MODE_ROUNDROBIN,
+ /** Packet-size based distribution mode */
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ /** Fail-over mode */
+ CDEV_SCHED_MODE_FAILOVER,
+ /** multi-core mode */
+ CDEV_SCHED_MODE_MULTICORE,
+
+ CDEV_SCHED_MODE_COUNT /**< number of modes */
+};
+
+#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN (64)
+#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN (256)
+
+/**
+ * Crypto scheduler option types
+ */
+enum rte_cryptodev_schedule_option_type {
+ CDEV_SCHED_OPTION_NOT_SET = 0,
+ CDEV_SCHED_OPTION_THRESHOLD,
+
+ CDEV_SCHED_OPTION_COUNT
+};
+
+/**
+ * Threshold option structure
+ */
+#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold"
+struct rte_cryptodev_scheduler_threshold_option {
+ uint32_t threshold; /**< Threshold for packet-size mode */
+};
+
+struct rte_cryptodev_scheduler;
+
+/**
+ * Load a user defined scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param scheduler
+ * Pointer to the user defined scheduler
+ *
+ * @return
+ * - 0 if the scheduler is successfully loaded
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -EINVAL if input values are invalid.
+ */
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler);
+
+/**
+ * Attach a crypto device to the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be attached
+ *
+ * @return
+ * - 0 if the slave is attached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -ENOMEM if the scheduler's slave list is full.
+ */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Detach a crypto device from the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be detached
+ *
+ * @return
+ * - 0 if the slave is detached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
+
+
+/**
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * - 0 if the mode is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return mode
+ * - non-negative enumerate value: the scheduling mode
+ * - -ENOTSUP if the operation is not supported.
+ */
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * Set the crypto ops reordering feature on/off
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param enable_reorder
+ * Set the crypto op reordering feature
+ * - 0: disable reordering
+ * - 1: enable reordering
+ *
+ * @return
+ * - 0 if the ordering is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder);
+
+/**
+ * Get the current crypto ops reordering feature
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * - 0 if reordering is disabled
+ * - 1 if reordering is enabled
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+
+/**
+ * Get the attached slaves' count and/or ID
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slaves
+ * If successful, the function will write back all slaves' device IDs to it.
+ * This parameter will either be an uint8_t array of
+ * RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ *
+ * @return
+ * - non-negative number: the number of slaves attached
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * The specific mode's option structure
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * If successful, the function will write back the current
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+/** The data structure associated with each mode of scheduler. */
+struct rte_cryptodev_scheduler {
+ const char *name; /**< Scheduler name */
+ const char *description; /**< Scheduler description */
+ enum rte_cryptodev_scheduler_mode mode; /**< Scheduling mode */
+
+ /** Pointer to scheduler operation structure */
+ struct rte_cryptodev_scheduler_ops *ops;
+};
+
+/** Round-robin mode scheduler */
+extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+/** Packet-size based distribution mode scheduler */
+extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+/** Fail-over mode scheduler */
+extern struct rte_cryptodev_scheduler *failover_scheduler;
+/** multi-core mode scheduler */
+extern struct rte_cryptodev_scheduler *multicore_scheduler;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
new file mode 100644
index 00000000..c4369589
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+#define _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+
+#include <rte_cryptodev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+
+typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
+typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_queue_pair)(
+ struct rte_cryptodev *dev, uint16_t qp_id);
+
+typedef int (*rte_cryptodev_scheduler_create_private_ctx)(
+ struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_option_set)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+typedef int (*rte_cryptodev_scheduler_config_option_get)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+struct rte_cryptodev_scheduler_ops {
+ rte_cryptodev_scheduler_slave_attach_t slave_attach;
+ rte_cryptodev_scheduler_slave_attach_t slave_detach;
+
+ rte_cryptodev_scheduler_start_t scheduler_start;
+ rte_cryptodev_scheduler_stop_t scheduler_stop;
+
+ rte_cryptodev_scheduler_config_queue_pair config_queue_pair;
+
+ rte_cryptodev_scheduler_create_private_ctx create_private_ctx;
+
+ rte_cryptodev_scheduler_config_option_set option_set;
+ rte_cryptodev_scheduler_config_option_get option_get;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_OPERATIONS_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
new file mode 100644
index 00000000..5c43127c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -0,0 +1,21 @@
+DPDK_17.02 {
+ global:
+
+ rte_cryptodev_scheduler_load_user_scheduler;
+ rte_cryptodev_scheduler_slave_attach;
+ rte_cryptodev_scheduler_slave_detach;
+ rte_cryptodev_scheduler_ordering_set;
+ rte_cryptodev_scheduler_ordering_get;
+
+};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_mode_set;
+ rte_cryptodev_scheduler_option_get;
+ rte_cryptodev_scheduler_option_set;
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c
new file mode 100644
index 00000000..ddfb5b81
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_failover.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_FAILOVER_SLAVES 2
+#define SLAVE_SWITCH_MASK (0x01)
+
+struct fo_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+
+ uint8_t deq_idx;
+};
+
+static __rte_always_inline uint16_t
+failover_slave_enqueue(struct scheduler_slave *slave,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+ ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+ &ops[enqueued_ops],
+ nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+
+ if (nb_deq_ops == nb_ops)
+ return nb_deq_ops;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+ slave->nb_inflight_cops -= nb_deq_ops2;
+ }
+
+ return nb_deq_ops + nb_deq_ops2;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->nb_slaves < 2) {
+ CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
+ return -ENOMEM;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = schedule_enqueue_ordering;
+ dev->dequeue_burst = schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = schedule_enqueue;
+ dev->dequeue_burst = schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)
+ dev->data->queue_pairs[i])->private_qp_ctx;
+
+ rte_memcpy(&qp_ctx->primary_slave,
+ &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ rte_memcpy(&qp_ctx->secondary_slave,
+ &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx;
+
+ fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
+ rte_socket_id());
+ if (!fo_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /*option_get */
+};
+
+struct rte_cryptodev_scheduler fo_scheduler = {
+ .name = "failover-scheduler",
+ .description = "scheduler which enqueues to the primary slave, "
+ "and only then enqueues to the secondary slave "
+ "upon failing on enqueuing to primary",
+ .mode = CDEV_SCHED_MODE_FAILOVER,
+ .ops = &scheduler_fo_ops
+};
+
+struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c
new file mode 100644
index 00000000..d410e69d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_multicore.c
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
+#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
+
+#define MC_SCHED_BUFFER_SIZE 32
+
+#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+
+/** multi-core scheduler context */
+struct mc_scheduler_ctx {
+ uint32_t num_workers; /**< Number of workers polling */
+ uint32_t stop_signal;
+
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
+};
+
+struct mc_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
+
+ struct mc_scheduler_ctx *mc_private_ctx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_queue_ops;
+ processed_ops += nb_queue_ops;
+
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+ mc_qp_ctx->last_enq_worker_idx = worker_idx;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_deq_ops;
+ processed_ops += nb_deq_ops;
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+
+ mc_qp_ctx->last_deq_worker_idx = worker_idx;
+
+ return processed_ops;
+
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+
+ if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
+ break;
+
+ op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq) {
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+ }
+
+ return nb_ops_deqd;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+mc_scheduler_worker(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ struct rte_ring *enq_ring;
+ struct rte_ring *deq_ring;
+ uint32_t core_id = rte_lcore_id();
+ int i, worker_idx = -1;
+ struct scheduler_slave *slave;
+ struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
+ struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
+ uint16_t processed_ops;
+ uint16_t pending_enq_ops = 0;
+ uint16_t pending_enq_ops_idx = 0;
+ uint16_t pending_deq_ops = 0;
+ uint16_t pending_deq_ops_idx = 0;
+ uint16_t inflight_ops = 0;
+ const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
+
+ for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
+ if (sched_ctx->wc_pool[i] == core_id) {
+ worker_idx = i;
+ break;
+ }
+ }
+ if (worker_idx == -1) {
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
+ return -1;
+ }
+
+ slave = &sched_ctx->slaves[worker_idx];
+ enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+
+ while (!mc_ctx->stop_signal) {
+ if (pending_enq_ops) {
+ processed_ops =
+ rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ pending_enq_ops);
+ pending_enq_ops -= processed_ops;
+ pending_enq_ops_idx += processed_ops;
+ inflight_ops += processed_ops;
+ } else {
+ processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
+ MC_SCHED_BUFFER_SIZE, NULL);
+ if (processed_ops) {
+ pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
+ slave->dev_id, slave->qp_id,
+ enq_ops, processed_ops);
+ pending_enq_ops = processed_ops - pending_enq_ops_idx;
+ inflight_ops += pending_enq_ops_idx;
+ }
+ }
+
+ if (pending_deq_ops) {
+ processed_ops = rte_ring_enqueue_burst(
+ deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
+ pending_deq_ops, NULL);
+ pending_deq_ops -= processed_ops;
+ pending_deq_ops_idx += processed_ops;
+ } else if (inflight_ops) {
+ processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (processed_ops) {
+ inflight_ops -= processed_ops;
+ if (reordering_enabled) {
+ uint16_t j;
+
+ for (j = 0; j < processed_ops; j++) {
+ deq_ops[j]->status |=
+ CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+ } else {
+ pending_deq_ops_idx = rte_ring_enqueue_burst(
+ deq_ring, (void *)deq_ops, processed_ops,
+ NULL);
+ pending_deq_ops = processed_ops -
+ pending_deq_ops_idx;
+ }
+ }
+ }
+
+ rte_pause();
+ }
+
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 0;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_remote_launch(
+ (lcore_function_t *)mc_scheduler_worker, dev,
+ sched_ctx->wc_pool[i]);
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(mc_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ mc_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ mc_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ mc_qp_ctx->last_enq_worker_idx = 0;
+ mc_qp_ctx->last_deq_worker_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 1;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx;
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
+ rte_socket_id());
+ if (!mc_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ mc_qp_ctx->mc_private_ctx = mc_ctx;
+ qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
+
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
+ uint16_t i;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!mc_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory");
+ return -ENOMEM;
+ }
+
+ mc_ctx->num_workers = sched_ctx->nb_wc;
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ char r_name[16];
+
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
+ }
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
+ }
+ }
+
+ sched_ctx->private_ctx = (void *)mc_ctx;
+
+ return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler mc_scheduler = {
+ .name = "multicore-scheduler",
+ .description = "scheduler which will run burst across multiple cpu cores",
+ .mode = CDEV_SCHED_MODE_MULTICORE,
+ .ops = &scheduler_mc_ops
+};
+
+struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
new file mode 100644
index 00000000..74129b66
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -0,0 +1,420 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
+#define SLAVE_IDX_SWITCH_MASK (0x01)
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_PKT_SIZE_SLAVES 2
+
+/** pkt size based scheduler context */
+struct psd_scheduler_ctx {
+ uint32_t threshold;
+};
+
+/** pkt size based scheduler queue pair context */
+struct psd_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+ uint32_t threshold;
+ uint8_t deq_idx;
+} __rte_cache_aligned;
+
+/** scheduling operation variables' wrapping */
+struct psd_schedule_op {
+ uint8_t slave_idx;
+ uint16_t pos;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
+ struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
+ uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
+ };
+ struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
+ {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+ };
+ struct psd_schedule_op *p_enq_op;
+ uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
+ uint32_t job_len;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++) {
+ rte_prefetch0(ops[i]->sym);
+ rte_prefetch0(ops[i]->sym->session);
+ }
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym);
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ /* job_len is initialized as cipher data length, once
+ * it is 0, equals to auth data length
+ */
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ /* decide the target op based on the job length */
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ /* stop schedule cops before the queue is full, this shall
+ * prevent the failed enqueue
+ */
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ p_enq_op->pos++;
+
+ job_len = ops[i+1]->sym->cipher.data.length;
+ job_len += (ops[i+1]->sym->cipher.data.length == 0) *
+ ops[i+1]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+ p_enq_op->pos++;
+
+ job_len = ops[i+2]->sym->cipher.data.length;
+ job_len += (ops[i+2]->sym->cipher.data.length == 0) *
+ ops[i+2]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+ p_enq_op->pos++;
+
+ job_len = ops[i+3]->sym->cipher.data.length;
+ job_len += (ops[i+3]->sym->cipher.data.length == 0) *
+ ops[i+3]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+ p_enq_op->pos++;
+ }
+
+ for (; i < nb_ops; i++) {
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ p_enq_op->pos++;
+ }
+
+ processed_ops_pri = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
+ sched_ops[PRIMARY_SLAVE_IDX],
+ enq_ops[PRIMARY_SLAVE_IDX].pos);
+ /* enqueue shall not fail as the slave queue is monitored */
+ RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+
+ processed_ops_sec = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
+ sched_ops[SECONDARY_SLAVE_IDX],
+ enq_ops[SECONDARY_SLAVE_IDX].pos);
+ RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+
+ return processed_ops_pri + processed_ops_sec;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct psd_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops_pri;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+
+ if (nb_deq_ops_pri == nb_ops)
+ return nb_deq_ops_pri;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops_pri],
+ nb_ops - nb_deq_ops_pri);
+ slave->nb_inflight_cops -= nb_deq_ops_sec;
+
+ if (!slave->nb_inflight_cops)
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
+ SLAVE_IDX_SWITCH_MASK;
+ }
+
+ return nb_deq_ops_pri + nb_deq_ops_sec;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ /* for packet size based scheduler, nb_slaves have to >= 2 */
+ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
+ return -1;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx =
+ qp_ctx->private_qp_ctx;
+
+ ps_qp_ctx->primary_slave.dev_id =
+ sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->primary_slave.qp_id = i;
+ ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->secondary_slave.dev_id =
+ sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->secondary_slave.qp_id = i;
+ ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->threshold = psd_ctx->threshold;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
+
+ if (ps_qp_ctx->primary_slave.nb_inflight_cops +
+ ps_qp_ctx->secondary_slave.nb_inflight_cops) {
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx;
+
+ ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
+ rte_socket_id());
+ if (!ps_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx;
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!psd_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory");
+ return -ENOMEM;
+ }
+
+ psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
+
+ sched_ctx->private_ctx = (void *)psd_ctx;
+
+ return 0;
+}
+static int
+scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ uint32_t threshold;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
+ option)->threshold;
+ if (!rte_is_power_of_2(threshold)) {
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
+ return -EINVAL;
+ }
+
+ psd_ctx->threshold = ~(threshold - 1);
+
+ return 0;
+}
+
+static int
+scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ struct rte_cryptodev_scheduler_threshold_option *threshold_option;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CR_SCHED_LOG(ERR, "Option not supported");
+ return -EINVAL;
+ }
+
+ threshold_option = option;
+ threshold_option->threshold = (~psd_ctx->threshold) + 1;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ scheduler_option_set,
+ scheduler_option_get
+};
+
+struct rte_cryptodev_scheduler psd_scheduler = {
+ .name = "packet-size-based-scheduler",
+ .description = "scheduler which will distribute crypto op "
+ "burst based on the packet size",
+ .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ .ops = &scheduler_ps_ops
+};
+
+struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
new file mode 100644
index 00000000..a9221a94
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_reorder.h>
+#include <rte_string_fns.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+uint8_t cryptodev_driver_id;
+
+struct scheduler_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ uint32_t nb_slaves;
+ enum rte_cryptodev_scheduler_mode mode;
+ char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ uint32_t enable_ordering;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
+ char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+ [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+};
+
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
+#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
+#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
+#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
+#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
+
+const char *scheduler_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
+ RTE_CRYPTODEV_VDEV_SLAVE,
+ RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ RTE_CRYPTODEV_VDEV_ORDERING,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ RTE_CRYPTODEV_VDEV_COREMASK,
+ RTE_CRYPTODEV_VDEV_CORELIST
+};
+
+struct scheduler_parse_map {
+ const char *name;
+ uint32_t val;
+};
+
+const struct scheduler_parse_map scheduler_mode_map[] = {
+ {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
+ CDEV_SCHED_MODE_ROUNDROBIN},
+ {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR},
+ {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
+ CDEV_SCHED_MODE_FAILOVER},
+ {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
+ CDEV_SCHED_MODE_MULTICORE}
+};
+
+const struct scheduler_parse_map scheduler_ordering_map[] = {
+ {"enable", 1},
+ {"disable", 0}
+};
+
+#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':'
+
+static int
+cryptodev_scheduler_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct scheduler_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev",
+ name);
+ return -EFAULT;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_crypto_scheduler_pmd_ops;
+
+ sched_ctx = dev->data->dev_private;
+ sched_ctx->max_nb_queue_pairs =
+ init_params->def_p.max_nb_queue_pairs;
+
+ if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
+ uint16_t i;
+
+ sched_ctx->nb_wc = init_params->nb_wc;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ CR_SCHED_LOG(INFO, " Worker core[%u]=%u added",
+ i, sched_ctx->wc_pool[i]);
+ }
+ }
+
+ if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
+ init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ union {
+ struct rte_cryptodev_scheduler_threshold_option
+ threshold_option;
+ } option;
+ enum rte_cryptodev_schedule_option_type option_type;
+ char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char *s, *end;
+
+ ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
+ init_params->mode);
+ if (ret < 0) {
+ rte_cryptodev_pmd_release_device(dev);
+ return ret;
+ }
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (scheduler_mode_map[i].val != sched_ctx->mode)
+ continue;
+
+ CR_SCHED_LOG(INFO, " Scheduling mode = %s",
+ scheduler_mode_map[i].name);
+ break;
+ }
+
+ if (strlen(init_params->mode_param_str) > 0) {
+ s = strchr(init_params->mode_param_str,
+ CDEV_SCHED_MODE_PARAM_SEP_CHAR);
+ if (s == NULL) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ strlcpy(param_name, init_params->mode_param_str,
+ s - init_params->mode_param_str + 1);
+ s++;
+ strlcpy(param_val, s,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ switch (init_params->mode) {
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (strcmp(param_name,
+ RTE_CRYPTODEV_SCHEDULER_PARAM_THRES)
+ != 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+ option_type = CDEV_SCHED_OPTION_THRESHOLD;
+
+ option.threshold_option.threshold =
+ strtoul(param_val, &end, 0);
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ if (sched_ctx->ops.option_set(dev, option_type,
+ (void *)&option) < 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n",
+ param_name, param_val);
+ }
+ }
+
+ sched_ctx->reordering_enabled = init_params->enable_ordering;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (scheduler_ordering_map[i].val !=
+ sched_ctx->reordering_enabled)
+ continue;
+
+ CR_SCHED_LOG(INFO, " Packet ordering = %s",
+ scheduler_ordering_map[i].name);
+
+ break;
+ }
+
+ for (i = 0; i < init_params->nb_slaves; i++) {
+ sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+ rte_zmalloc_socket(
+ NULL,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
+ SOCKET_ID_ANY);
+
+ if (!sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves]) {
+ CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
+ name);
+ return -ENOMEM;
+ }
+
+ strncpy(sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves],
+ init_params->slave_names[i],
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ sched_ctx->nb_init_slaves++;
+ }
+
+ /*
+ * Initialize capabilities structure as an empty structure,
+ * in case device information is requested when no slaves are attached
+ */
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities),
+ 0, SOCKET_ID_ANY);
+
+ if (!sched_ctx->capabilities) {
+ CR_SCHED_LOG(ERR, "Not enough memory for capability "
+ "information");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -EINVAL;
+
+ sched_ctx = dev->data->dev_private;
+
+ if (sched_ctx->nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
+ sched_ctx->slaves[i].dev_id);
+ }
+
+ return rte_cryptodev_pmd_destroy(dev);
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CR_SCHED_LOG(ERR, "Argument has to be positive.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse integer from hexadecimal integer argument */
+static int
+parse_coremask_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
+ struct scheduler_init_params *params = extra_args;
+
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
+
+ return 0;
+}
+
+/** Parse integer from list of integers argument */
+static int
+parse_corelist_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->nb_wc = 0;
+
+ const char *token = value;
+
+ while (isdigit(token[0])) {
+ char *rval;
+ unsigned int core = strtoul(token, &rval, 10);
+
+ if (core >= RTE_MAX_LCORE) {
+ CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller "
+ "than %u.", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
+ token = (const char *)rval;
+ if (token[0] == '\0')
+ break;
+ token++;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** Parse slave */
+static int
+parse_slave_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CR_SCHED_LOG(ERR, "Too many slaves.");
+ return -ENOMEM;
+ }
+
+ strncpy(param->slave_names[param->nb_slaves++], value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ return 0;
+}
+
+static int
+parse_mode_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (strcmp(value, scheduler_mode_map[i].name) == 0) {
+ param->mode = (enum rte_cryptodev_scheduler_mode)
+ scheduler_mode_map[i].val;
+
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_mode_map)) {
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_mode_param_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ strlcpy(param->mode_param_str, value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+parse_ordering_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (strcmp(value, scheduler_ordering_map[i].name) == 0) {
+ param->enable_ordering =
+ scheduler_ordering_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_ordering_map)) {
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_parse_init_params(struct scheduler_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ scheduler_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
+ &parse_coremask_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
+ &parse_corelist_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
+ &parse_slave_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE,
+ &parse_mode_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ &parse_mode_param_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
+ &parse_ordering_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
+{
+ struct scheduler_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct scheduler_ctx),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ },
+ .nb_slaves = 0,
+ .mode = CDEV_SCHED_MODE_NOT_SET,
+ .enable_ordering = 0,
+ .slave_names = { {0} }
+ };
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ scheduler_parse_init_params(&init_params,
+ rte_vdev_device_args(vdev));
+
+
+ return cryptodev_scheduler_create(name,
+ vdev,
+ &init_params);
+}
+
+static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
+ .probe = cryptodev_scheduler_probe,
+ .remove = cryptodev_scheduler_remove
+};
+
+static struct cryptodev_driver scheduler_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
+ cryptodev_scheduler_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "slave=<name>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
+ cryptodev_scheduler_pmd_drv.driver,
+ cryptodev_driver_id);
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
new file mode 100644
index 00000000..778071ca
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_reorder.h>
+
+#include "scheduler_pmd_private.h"
+
+/** attaching the slaves predefined by scheduler's EAL options */
+static int
+scheduler_attach_init_slave(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t scheduler_id = dev->data->dev_id;
+ int i;
+
+ for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
+ const char *dev_name = sched_ctx->init_slave_names[i];
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_named_dev(dev_name);
+ int status;
+
+ if (!slave_dev) {
+ CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
+ dev_name);
+ return -EINVAL;
+ }
+
+ status = rte_cryptodev_scheduler_slave_attach(
+ scheduler_id, slave_dev->data->dev_id);
+
+ if (status < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
+ slave_dev->data->dev_id);
+ return status;
+ }
+
+ CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
+ dev->data->name,
+ sched_ctx->init_slave_names[i]);
+
+ rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
+
+ sched_ctx->nb_init_slaves -= 1;
+ }
+
+ return 0;
+}
+/** Configure device */
+static int
+scheduler_pmd_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_configure(slave_dev_id, config);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (sched_ctx->reordering_enabled) {
+ char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t buff_size = rte_align32pow2(
+ sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (!buff_size)
+ return 0;
+
+ if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+ dev->data->dev_id, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
+ "name");
+ return -ENOMEM;
+ }
+
+ qp_ctx->order_ring = rte_ring_create(order_ring_name,
+ buff_size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qp_ctx->order_ring) {
+ CR_SCHED_LOG(ERR, "failed to create order ring");
+ return -ENOMEM;
+ }
+ } else {
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+ }
+
+ return 0;
+}
+
+/** Start device */
+static int
+scheduler_pmd_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = update_order_ring(dev, i);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
+ return ret;
+ }
+ }
+
+ if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
+ CR_SCHED_LOG(ERR, "Scheduler mode is not set");
+ return -1;
+ }
+
+ if (!sched_ctx->nb_slaves) {
+ CR_SCHED_LOG(ERR, "No slave in the scheduler");
+ return -1;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
+ return -ENOTSUP;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
+
+ if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
+ CR_SCHED_LOG(ERR, "Scheduler start failed");
+ return -1;
+ }
+
+ /* start all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
+ slave_dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Stop device */
+static void
+scheduler_pmd_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ if (!dev->data->dev_started)
+ return;
+
+ /* stop all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->dev_stop)(slave_dev);
+ }
+
+ if (*sched_ctx->ops.scheduler_stop)
+ (*sched_ctx->ops.scheduler_stop)(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if (*sched_ctx->ops.slave_detach)
+ (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+ }
+}
+
+/** Close device */
+static int
+scheduler_pmd_close(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* the dev should be stopped before being closed */
+ if (dev->data->dev_started)
+ return -EBUSY;
+
+ /* close all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (qp_ctx->private_qp_ctx) {
+ rte_free(qp_ctx->private_qp_ctx);
+ qp_ctx->private_qp_ctx = NULL;
+ }
+ }
+
+ if (sched_ctx->private_ctx) {
+ rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
+
+ if (sched_ctx->capabilities) {
+ rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+scheduler_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+ struct rte_cryptodev_stats slave_stats = {0};
+
+ (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+
+ stats->enqueued_count += slave_stats.enqueued_count;
+ stats->dequeued_count += slave_stats.dequeued_count;
+
+ stats->enqueue_err_count += slave_stats.enqueue_err_count;
+ stats->dequeue_err_count += slave_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->stats_reset)(slave_dev);
+ }
+}
+
+/** Get device info */
+static void
+scheduler_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t max_nb_sess = 0;
+ uint16_t headroom_sz = 0;
+ uint16_t tailroom_sz = 0;
+ uint32_t i;
+
+ if (!dev_info)
+ return;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ scheduler_attach_init_slave(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev_info slave_info;
+
+ rte_cryptodev_info_get(slave_dev_id, &slave_info);
+ uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0) {
+ if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
+ max_nb_sess = slave_info.sym.max_nb_sessions;
+ }
+
+ /* Get the max headroom requirement among slave PMDs */
+ headroom_sz = slave_info.min_mbuf_headroom_req >
+ headroom_sz ?
+ slave_info.min_mbuf_headroom_req :
+ headroom_sz;
+
+ /* Get the max tailroom requirement among slave PMDs */
+ tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ tailroom_sz ?
+ slave_info.min_mbuf_tailroom_req :
+ tailroom_sz;
+ }
+
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = sched_ctx->capabilities;
+ dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
+ dev_info->min_mbuf_headroom_req = headroom_sz;
+ dev_info->min_mbuf_tailroom_req = tailroom_sz;
+ dev_info->sym.max_nb_sessions = max_nb_sess;
+}
+
+/** Release queue pair */
+static int
+scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (!qp_ctx)
+ return 0;
+
+ if (qp_ctx->order_ring)
+ rte_ring_free(qp_ctx->order_ring);
+ if (qp_ctx->private_qp_ctx)
+ rte_free(qp_ctx->private_qp_ctx);
+
+ rte_free(qp_ctx);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t i;
+ int ret;
+
+ if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "CRYTO_SCHE PMD %u QP %u",
+ dev->data->dev_id, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
+ return -EFAULT;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ scheduler_pmd_qp_release(dev, qp_id);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+
+ /*
+ * All slaves will share the same session mempool
+ * for session-less operations, so the objects
+ * must be big enough for all the drivers used.
+ */
+ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+ qp_conf, socket_id, session_pool);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (qp_ctx == NULL)
+ return -ENOMEM;
+
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
+ dev->data->queue_pairs[qp_id] = qp_ctx;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
+ scheduler_pmd_qp_release(dev, qp_id);
+ return ret;
+ }
+
+ if (*sched_ctx->ops.config_queue_pair) {
+ if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
+ CR_SCHED_LOG(ERR, "Unable to configure queue pair");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+scheduler_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static uint32_t
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t i = 0;
+ uint32_t max_priv_sess_size = 0;
+
+ /* Check what is the maximum private session size for all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+ uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
+
+ if (max_priv_sess_size < priv_sess_size)
+ max_priv_sess_size = priv_sess_size;
+ }
+
+ return max_priv_sess_size;
+}
+
+static int
+scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+ xform, mempool);
+ if (ret < 0) {
+ CR_SCHED_LOG(ERR, "unable to config sym session");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ /* Clear private data of slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ rte_cryptodev_sym_session_clear(slave->dev_id, sess);
+ }
+}
+
+struct rte_cryptodev_ops scheduler_pmd_ops = {
+ .dev_configure = scheduler_pmd_config,
+ .dev_start = scheduler_pmd_start,
+ .dev_stop = scheduler_pmd_stop,
+ .dev_close = scheduler_pmd_close,
+
+ .stats_get = scheduler_pmd_stats_get,
+ .stats_reset = scheduler_pmd_stats_reset,
+
+ .dev_infos_get = scheduler_pmd_info_get,
+
+ .queue_pair_setup = scheduler_pmd_qp_setup,
+ .queue_pair_release = scheduler_pmd_qp_release,
+ .queue_pair_count = scheduler_pmd_qp_count,
+
+ .sym_session_get_size = scheduler_pmd_sym_session_get_size,
+ .sym_session_configure = scheduler_pmd_sym_session_configure,
+ .sym_session_clear = scheduler_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
new file mode 100644
index 00000000..d5e602a2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _SCHEDULER_PMD_PRIVATE_H
+#define _SCHEDULER_PMD_PRIVATE_H
+
+#include "rte_cryptodev_scheduler.h"
+
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+/**< Scheduler Crypto PMD device name */
+
+#define PER_SLAVE_BUFF_SIZE (256)
+
+extern int scheduler_logtype_driver;
+
+#define CR_SCHED_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
+
+struct scheduler_slave {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint32_t nb_inflight_cops;
+
+ uint8_t driver_id;
+};
+
+struct scheduler_ctx {
+ void *private_ctx;
+ /**< private scheduler context pointer */
+
+ struct rte_cryptodev_capabilities *capabilities;
+ uint32_t nb_capabilities;
+
+ uint32_t max_nb_queue_pairs;
+
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ enum rte_cryptodev_scheduler_mode mode;
+
+ struct rte_cryptodev_scheduler_ops ops;
+
+ uint8_t reordering_enabled;
+
+ char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
+
+ char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ int nb_init_slaves;
+} __rte_cache_aligned;
+
+struct scheduler_qp_ctx {
+ void *private_qp_ctx;
+
+ uint32_t max_nb_objs;
+
+ struct rte_ring *order_ring;
+ uint32_t seqn;
+} __rte_cache_aligned;
+
+
+extern uint8_t cryptodev_driver_id;
+
+static __rte_always_inline uint16_t
+get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
+{
+ uint32_t count = rte_ring_free_count(order_ring);
+
+ return count > nb_ops ? nb_ops : count;
+}
+
+static __rte_always_inline void
+scheduler_order_insert(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
+}
+
+#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
+ struct rte_crypto_op **ring = (void *)&order_ring[1]; \
+ op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
+} while (0)
+
+static __rte_always_inline uint16_t
+scheduler_order_drain(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ break;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq)
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+
+ return nb_ops_deqd;
+}
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+
+#endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
new file mode 100644
index 00000000..c7082a64
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+struct rr_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_slave_idx;
+ uint32_t last_deq_slave_idx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+ struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+ uint16_t i, processed_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ slave->nb_inflight_cops += processed_ops;
+
+ rr_qp_ctx->last_enq_slave_idx += 1;
+ rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slave;
+ uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+ uint16_t nb_deq_ops;
+
+ if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+ do {
+ last_slave_idx += 1;
+
+ if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+ last_slave_idx = 0;
+ /* looped back, means no inflight cops in the queue */
+ if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+ return 0;
+ } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+ == 0);
+ }
+
+ slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ last_slave_idx += 1;
+ last_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+
+ slave->nb_inflight_cops -= nb_deq_ops;
+
+ return nb_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ rr_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ rr_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ rr_qp_ctx->last_enq_slave_idx = 0;
+ rr_qp_ctx->last_deq_slave_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx;
+
+ rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
+ rte_socket_id());
+ if (!rr_qp_ctx) {
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler scheduler = {
+ .name = "roundrobin-scheduler",
+ .description = "scheduler which will round robin burst across "
+ "slave crypto devices",
+ .mode = CDEV_SCHED_MODE_ROUNDROBIN,
+ .ops = &scheduler_rr_ops
+};
+
+struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/Makefile b/src/spdk/dpdk/drivers/crypto/snow3g/Makefile
new file mode 100644
index 00000000..ee5027d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_SNOW3G_PATH),)
+$(error "Please define LIBSSO_SNOW3G_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_snow3g.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_snow3g_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
+LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map b/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
new file mode 100644
index 00000000..a17536b7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+#define SNOW3G_IV_LENGTH 16
+#define SNOW3G_MAX_BURST 8
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum snow3g_operation
+snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return SNOW3G_OP_AUTH_CIPHER;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return SNOW3G_OP_CIPHER_AUTH;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ return SNOW3G_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum snow3g_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = snow3g_get_mode(xform);
+
+ switch (mode) {
+ case SNOW3G_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case SNOW3G_OP_NOT_SUPPORTED:
+ default:
+ SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only SNOW 3G UEA2 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ /* Initialize key */
+ sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only SNOW 3G UIA2 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
+ /* Initialize key */
+ sso_snow3g_init_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get SNOW 3G session. */
+static struct snow3g_session *
+snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
+{
+ struct snow3g_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct snow3g_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct snow3g_session *)_sess_private_data;
+
+ if (unlikely(snow3g_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_snow3g_cipher_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
+ uint8_t *iv[SNOW3G_MAX_BURST];
+ uint32_t num_bytes[SNOW3G_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, iv, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+ struct snow3g_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *iv;
+ uint32_t length_in_bits, offset_in_bits;
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint8_t *iv;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG(ERR, "Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ iv, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ SNOW3G_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ iv, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+ for (i = 0; i < num_ops; i++) {
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return 0;
+ }
+ }
+#endif
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_ops = process_snow3g_cipher_op(ops,
+ session, num_ops);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
+ num_ops);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_ops = process_snow3g_cipher_op(ops, session,
+ num_ops);
+ process_snow3g_hash_op(qp, ops, session, processed_ops);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
+ num_ops);
+ process_snow3g_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct snow3g_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_op = process_snow3g_cipher_op_bit(op,
+ session);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_op = process_snow3g_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_snow3g_hash_op(qp, &op, session, 1);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
+ if (processed_op == 1)
+ process_snow3g_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct snow3g_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)&op, processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
+ struct snow3g_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = snow3g_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((curr_c_op->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == SNOW3G_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+snow3g_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct snow3g_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_snow3g_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct snow3g_private *internals;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_snow3g_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = snow3g_pmd_dequeue_burst;
+ dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
+ init_params->name);
+
+ cryptodev_snow3g_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct snow3g_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_snow3g_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
+ .probe = cryptodev_snow3g_probe,
+ .remove = cryptodev_snow3g_remove
+};
+
+static struct cryptodev_driver snow3g_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
+ cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(snow3g_init_log)
+{
+ snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
new file mode 100644
index 00000000..cfbc9522
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+snow3g_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+snow3g_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+snow3g_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+snow3g_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+snow3g_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+snow3g_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct snow3g_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = snow3g_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+snow3g_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct snow3g_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "snow3g_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ SNOW3G_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ SNOW3G_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct snow3g_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ snow3g_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (snow3g_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = snow3g_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+snow3g_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the SNOW 3G session structure */
+static unsigned
+snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct snow3g_session);
+}
+
+/** Configure a SNOW 3G session from a crypto xform chain */
+static int
+snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ SNOW3G_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ SNOW3G_LOG(ERR,
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = snow3g_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ SNOW3G_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct snow3g_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops snow3g_pmd_ops = {
+ .dev_configure = snow3g_pmd_config,
+ .dev_start = snow3g_pmd_start,
+ .dev_stop = snow3g_pmd_stop,
+ .dev_close = snow3g_pmd_close,
+
+ .stats_get = snow3g_pmd_stats_get,
+ .stats_reset = snow3g_pmd_stats_reset,
+
+ .dev_infos_get = snow3g_pmd_info_get,
+
+ .queue_pair_setup = snow3g_pmd_qp_setup,
+ .queue_pair_release = snow3g_pmd_qp_release,
+ .queue_pair_count = snow3g_pmd_qp_count,
+
+ .sym_session_get_size = snow3g_pmd_sym_session_get_size,
+ .sym_session_configure = snow3g_pmd_sym_session_configure,
+ .sym_session_clear = snow3g_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
new file mode 100644
index 00000000..b7807b62
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
+#define _RTE_SNOW3G_PMD_PRIVATE_H_
+
+#include <sso_snow3g.h>
+
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< SNOW 3G PMD device name */
+
+/** SNOW 3G PMD LOGTYPE DRIVER */
+int snow3g_logtype_driver;
+
+#define SNOW3G_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define SNOW3G_DIGEST_LENGTH 4
+
+/** private data structure for each virtual SNOW 3G device */
+struct snow3g_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** SNOW 3G buffer queue pair */
+struct snow3g_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum snow3g_operation {
+ SNOW3G_OP_ONLY_CIPHER,
+ SNOW3G_OP_ONLY_AUTH,
+ SNOW3G_OP_CIPHER_AUTH,
+ SNOW3G_OP_AUTH_CIPHER,
+ SNOW3G_OP_NOT_SUPPORTED
+};
+
+/** SNOW 3G private session structure */
+struct snow3g_session {
+ enum snow3g_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ sso_snow3g_key_schedule_t pKeySched_cipher;
+ sso_snow3g_key_schedule_t pKeySched_hash;
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
+
+
+
+#endif /* _RTE_SNOW3G_PMD_PRIVATE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/Makefile b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/meson.build b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..568b5a40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
+ .sym_session_configure = virtio_crypto_sym_configure_session,
+ .sym_session_clear = virtio_crypto_sym_clear_session
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_sym_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
+ set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
+ rte_mempool_put(sess_mp, session);
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "cipher IV size cannot be longer than %u",
+ VIRTIO_CRYPTO_MAX_IV_SIZE);
+ return -1;
+ }
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..0fd7b722
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+ uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..e32a1ecd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+ sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_sym_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ if (cop->phys_addr)
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ else {
+ rte_memcpy(crypto_op_cookie->iv,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, session->iv.offset),
+ session->iv.length);
+ desc[idx].addr = indirect_op_data_req_phys_addr +
+ indirect_iv_addr_offset;
+ }
+
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/Makefile b/src/spdk/dpdk/drivers/crypto/zuc/Makefile
new file mode 100644
index 00000000..68d84eeb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_ZUC_PATH),)
+$(error "Please define LIBSSO_ZUC_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_zuc.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zuc_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_ZUC_PATH)
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
+LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map b/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
new file mode 100644
index 00000000..313f4590
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_zuc_pmd_private.h"
+#define ZUC_MAX_BURST 4
+#define BYTE_LEN 8
+
+static uint8_t cryptodev_driver_id;
+
+/** Get xform chain order. */
+static enum zuc_operation
+zuc_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ZUC_OP_AUTH_CIPHER;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ZUC_OP_CIPHER_AUTH;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ return ZUC_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum zuc_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = zuc_get_mode(xform);
+
+ switch (mode) {
+ case ZUC_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case ZUC_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case ZUC_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case ZUC_OP_NOT_SUPPORTED:
+ default:
+ ZUC_LOG(ERR, "Unsupported operation chain order parameter");
+ return -ENOTSUP;
+ }
+
+ if (cipher_xform) {
+ /* Only ZUC EEA3 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ /* Copy the key */
+ memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+ if (auth_xform) {
+ /* Only ZUC EIA3 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
+ ZUC_LOG(ERR, "Wrong digest length");
+ return -EINVAL;
+ }
+
+ sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG(ERR, "Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
+ /* Copy the key */
+ memcpy(sess->pKey_hash, auth_xform->auth.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get ZUC session. */
+static struct zuc_session *
+zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
+{
+ struct zuc_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct zuc_session *)get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct zuc_session *)_sess_private_data;
+
+ if (unlikely(zuc_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs. */
+static uint8_t
+process_zuc_cipher_op(struct rte_crypto_op **ops,
+ struct zuc_session **sessions,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
+ uint8_t *iv[ZUC_MAX_BURST];
+ uint32_t num_bytes[ZUC_MAX_BURST];
+ uint8_t *cipher_keys[ZUC_MAX_BURST];
+ struct zuc_session *sess;
+
+ for (i = 0; i < num_ops; i++) {
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG(ERR, "Data Length or offset");
+ break;
+ }
+
+ sess = sessions[i];
+
+#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ sess->cipher_iv_offset);
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ cipher_keys[i] = sess->pKey_cipher;
+
+ processed_ops++;
+ }
+
+ sso_zuc_eea3_n_buffer(cipher_keys, iv, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs. */
+static int
+process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
+ struct zuc_session **sessions,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src;
+ uint32_t *dst;
+ uint32_t length_in_bits;
+ uint8_t *iv;
+ struct zuc_session *sess;
+
+ for (i = 0; i < num_ops; i++) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG(ERR, "Offset");
+ break;
+ }
+
+ sess = sessions[i];
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ sess->auth_iv_offset);
+
+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint32_t *)qp->temp_digest;
+
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
+ iv, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ZUC_DIGEST_LENGTH) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ dst = (uint32_t *)ops[i]->sym->auth.digest.data;
+
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
+ iv, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same operation type. */
+static int
+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+ struct zuc_session **sessions,
+ struct zuc_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (op_type) {
+ case ZUC_OP_ONLY_CIPHER:
+ processed_ops = process_zuc_cipher_op(ops,
+ sessions, num_ops);
+ break;
+ case ZUC_OP_ONLY_AUTH:
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
+ num_ops);
+ break;
+ case ZUC_OP_CIPHER_AUTH:
+ processed_ops = process_zuc_cipher_op(ops, sessions,
+ num_ops);
+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
+ num_ops);
+ process_zuc_cipher_op(ops, sessions, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sessions[i], 0, sizeof(struct zuc_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sessions[i]);
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct zuc_session *curr_sess;
+ struct zuc_session *sessions[ZUC_MAX_BURST];
+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
+ enum zuc_operation curr_zuc_op;
+ struct zuc_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ curr_sess = zuc_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ curr_zuc_op = curr_sess->op;
+
+ /*
+ * Batch ops that share the same operation type
+ * (cipher only, auth only...).
+ */
+ if (burst_size == 0) {
+ prev_zuc_op = curr_zuc_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ } else if (curr_zuc_op == prev_zuc_op) {
+ c_ops[burst_size] = curr_c_op;
+ sessions[burst_size] = curr_sess;
+ burst_size++;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == ZUC_MAX_BURST) {
+ processed_ops = process_ops(c_ops, curr_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ }
+ } else {
+ /*
+ * Different operation type, process the ops
+ * of the previous type.
+ */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_zuc_op = curr_zuc_op;
+
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last operation type. */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct zuc_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_zuc_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct zuc_private *internals;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
+
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ ZUC_LOG(ERR, "failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_zuc_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = zuc_pmd_dequeue_burst;
+ dev->enqueue_burst = zuc_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+ return 0;
+init_error:
+ ZUC_LOG(ERR, "driver %s: failed",
+ init_params->name);
+
+ cryptodev_zuc_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_zuc_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct zuc_private),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ return cryptodev_zuc_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_zuc_remove(struct rte_vdev_device *vdev)
+{
+
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
+ .probe = cryptodev_zuc_probe,
+ .remove = cryptodev_zuc_remove
+};
+
+static struct cryptodev_driver zuc_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(zuc_init_log)
+{
+ zuc_logtype_driver = rte_log_register("pmd.crypto.zuc");
+}
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
new file mode 100644
index 00000000..6da39654
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_zuc_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+zuc_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zuc_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+zuc_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct zuc_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zuc_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct zuc_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "zuc_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZUC_LOG(INFO, "Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ZUC_LOG(ERR, "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct zuc_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zuc_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zuc_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+zuc_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the ZUC session structure */
+static unsigned
+zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct zuc_session);
+}
+
+/** Configure a ZUC session from a crypto xform chain */
+static int
+zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ ZUC_LOG(ERR, "invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ ZUC_LOG(ERR,
+ "Couldn't get object from session mempool");
+
+ return -ENOMEM;
+ }
+
+ ret = zuc_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ ZUC_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+zuc_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct zuc_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+struct rte_cryptodev_ops zuc_pmd_ops = {
+ .dev_configure = zuc_pmd_config,
+ .dev_start = zuc_pmd_start,
+ .dev_stop = zuc_pmd_stop,
+ .dev_close = zuc_pmd_close,
+
+ .stats_get = zuc_pmd_stats_get,
+ .stats_reset = zuc_pmd_stats_reset,
+
+ .dev_infos_get = zuc_pmd_info_get,
+
+ .queue_pair_setup = zuc_pmd_qp_setup,
+ .queue_pair_release = zuc_pmd_qp_release,
+ .queue_pair_count = zuc_pmd_qp_count,
+
+ .sym_session_get_size = zuc_pmd_sym_session_get_size,
+ .sym_session_configure = zuc_pmd_sym_session_configure,
+ .sym_session_clear = zuc_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
new file mode 100644
index 00000000..5e5906dd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _RTE_ZUC_PMD_PRIVATE_H_
+#define _RTE_ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< KASUMI PMD device name */
+
+/** ZUC PMD LOGTYPE DRIVER */
+int zuc_logtype_driver;
+#define ZUC_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
+ "%s()... line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */